Immutable branch between MFD, Extcon and I2C due for the v6.3 merge window

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEdrbJNaO+IJqU8IdIUa+KL4f8d2EFAmPeWnwACgkQUa+KL4f8
 d2HgAxAAqqeW0yFhG7LXxcySFplGOX5LV0DTz5HrASwdk6yC1e+PUSjbVxaPgbbT
 Vzz/Efw3VbZvlDEN1EGAu+V4NTmKpscvAFk4n2bNxcTVa8MtGUpZchLaPCbU1IQG
 LBhIbL1E++L3qqavlOzrGpie8fRt9aPz1hqOXblSmLNEMq7hbeA2ebFa72Ofy7f9
 lkbGgv32/l9D7IRU/tEGPXvrw3J1V70YaU+wlmLR9YXlxIoG+VDrvi45Bvv1U7mU
 UAkw3jxxhSSFLATvw0VcBHUZQ+XKY7Ymiu5pA99psfA7jxb5PLeDcSMwyCyI6eHj
 9Uy/uhrlwtvR1zXHbWAY7MhS6ickEnplRL87xo5mNmcxl1lMY/iVLC5JVa8Os9mj
 e+NBdWzhzgM84QJsZBNSe6wuWKVrsvDqwA/D3lEoAHeaaKIBkLpSi+NS7rfzBb76
 YdhujAh5cuXgl2FNjw4tNv1XIWJ6aDFhE9CvXTTAiOngmqgy6ZRHK5Ceo4VhCaBD
 Hsa1R0glmfU3H7tEh7Y/VKDxLDaKTBtgeCF+mXFoKUU89mv6ypx/H5e2WR4kfdvn
 5zKrbecZeaKvVphzkJxE2DO5yRIEQ1yLKDEMfMPNTV5ZQOYoGs/Cqli38L2p1Zk4
 h0EDL3HIAUF4+Nb/qQtmDXeHpetRmwOhEVX4LMHvlRhLwBEYUBk=
 =Z4m+
 -----END PGP SIGNATURE-----
mergetag object acf63c458b
 type commit
 tag ib-mfd-fpga-hwmon-v6.3-1
 tagger Lee Jones <lee@kernel.org> 1675066458 +0000
 
 Immutable branch between MFD, FPGA and HWMON due for the v6.3 merge window
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEdrbJNaO+IJqU8IdIUa+KL4f8d2EFAmPXfGkACgkQUa+KL4f8
 d2G0VA//Vb5S4oYYQ4e24ui+U86EFUmaGZVb4Isl36y+wG8/usguA3Joryw+JlzB
 lDWaOD8XzxKA74XHM9Gj1RfLO60g5tEvbfClXJhEvxxCYKhElgCPNqpp9cUa+Uws
 lITFMjTfoUWMPyiiWIqw0F7x2Li4OQ4+EjScXDLpxjx2RzYtLu4mOo5cJqnBQVOs
 p+x5asFGv0u7dyFJr/vZOCR6bFb8SGlqNs46iEXB29H1hF3Rm5ag0nH+BTY8WG3k
 aMtEo72UtVGXGuK6pClzpUgbcVAd+KRNouFS6ZXMXDLpMWstFCetHSoh/XJ+sXMB
 Tiv6yG9t1QUcS0H/arWpFTce7Bp9aBcHDfeS++a3m9+gRYUtmpLcasaiR7udUxee
 Q7LT8CUGJ1KqzyGg9k76vaPaAKyvpPeQY8ozVrmmhi9Ms1d6+XhPmy1uM5oAwsTP
 BvO2SRwvxPvlAgCxbtYXxx+O0rgbMvniS1F9JoGJ09nEiChKSgMvw8yZZLkeh8Sy
 qCu4IRno0Ns77PJ6r0pN/AFzXVvx5BFBh2YUAOgpUHfy8Bq03vxI4/6deOkps3p0
 AMQPQGwh4qGvJu7WxXQ4Acu8W1h4piSpqkxOdhlElNwK6VLXAP/ffyVsV4tm7iqn
 EJzM7Kq8OH6rzs9nl/bueB0E5ZSn6gOHhXrojL+znno22EXhL/o=
 =JbTj
 -----END PGP SIGNATURE-----

Merge tags 'ib-mfd-extcon-i2c-v6.3' and 'ib-mfd-fpga-hwmon-v6.3-1' into ibs-for-mfd-merged

Immutable branch between MFD, Extcon and I2C due for the v6.3 merge window

Immutable branch between MFD, FPGA and HWMON due for the v6.3 merge window
This commit is contained in:
Lee Jones 2023-02-22 08:25:03 +00:00
commit 28e1958009
12 changed files with 1221 additions and 433 deletions

View File

@ -1,4 +1,4 @@
What: /sys/bus/spi/devices/.../bmc_version
What: /sys/bus/.../drivers/intel-m10-bmc/.../bmc_version
Date: June 2020
KernelVersion: 5.10
Contact: Xu Yilun <yilun.xu@intel.com>
@ -6,7 +6,7 @@ Description: Read only. Returns the hardware build version of Intel
MAX10 BMC chip.
Format: "0x%x".
What: /sys/bus/spi/devices/.../bmcfw_version
What: /sys/bus/.../drivers/intel-m10-bmc/.../bmcfw_version
Date: June 2020
KernelVersion: 5.10
Contact: Xu Yilun <yilun.xu@intel.com>
@ -14,7 +14,7 @@ Description: Read only. Returns the firmware version of Intel MAX10
BMC chip.
Format: "0x%x".
What: /sys/bus/spi/devices/.../mac_address
What: /sys/bus/.../drivers/intel-m10-bmc/.../mac_address
Date: January 2021
KernelVersion: 5.12
Contact: Russ Weight <russell.h.weight@intel.com>
@ -25,7 +25,7 @@ Description: Read only. Returns the first MAC address in a block
space.
Format: "%02x:%02x:%02x:%02x:%02x:%02x".
What: /sys/bus/spi/devices/.../mac_count
What: /sys/bus/.../drivers/intel-m10-bmc/.../mac_count
Date: January 2021
KernelVersion: 5.12
Contact: Russ Weight <russell.h.weight@intel.com>

View File

@ -10576,7 +10576,7 @@ S: Maintained
F: Documentation/ABI/testing/sysfs-driver-intel-m10-bmc
F: Documentation/hwmon/intel-m10-bmc-hwmon.rst
F: drivers/hwmon/intel-m10-bmc-hwmon.c
F: drivers/mfd/intel-m10-bmc.c
F: drivers/mfd/intel-m10-bmc*
F: include/linux/mfd/intel-m10-bmc.h
INTEL MENLOW THERMAL DRIVER

View File

@ -246,7 +246,7 @@ config FPGA_MGR_VERSAL_FPGA
config FPGA_M10_BMC_SEC_UPDATE
tristate "Intel MAX10 BMC Secure Update driver"
depends on MFD_INTEL_M10_BMC
depends on MFD_INTEL_M10_BMC_CORE
select FW_LOADER
select FW_UPLOAD
help

View File

@ -14,6 +14,12 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
struct m10bmc_sec;
struct m10bmc_sec_ops {
int (*rsu_status)(struct m10bmc_sec *sec);
};
struct m10bmc_sec {
struct device *dev;
struct intel_m10bmc *m10bmc;
@ -21,6 +27,7 @@ struct m10bmc_sec {
char *fw_name;
u32 fw_name_id;
bool cancel_request;
const struct m10bmc_sec_ops *ops;
};
static DEFINE_XARRAY_ALLOC(fw_upload_xa);
@ -31,6 +38,71 @@ static DEFINE_XARRAY_ALLOC(fw_upload_xa);
#define REH_MAGIC GENMASK(15, 0)
#define REH_SHA_NUM_BYTES GENMASK(31, 16)
static int m10bmc_sec_write(struct m10bmc_sec *sec, const u8 *buf, u32 offset, u32 size)
{
struct intel_m10bmc *m10bmc = sec->m10bmc;
unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
u32 write_count = size / stride;
u32 leftover_offset = write_count * stride;
u32 leftover_size = size - leftover_offset;
u32 leftover_tmp = 0;
int ret;
if (sec->m10bmc->flash_bulk_ops)
return sec->m10bmc->flash_bulk_ops->write(m10bmc, buf, offset, size);
if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
return -EINVAL;
ret = regmap_bulk_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset,
buf + offset, write_count);
if (ret)
return ret;
/* If size is not aligned to stride, handle the remainder bytes with regmap_write() */
if (leftover_size) {
memcpy(&leftover_tmp, buf + leftover_offset, leftover_size);
ret = regmap_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset + leftover_offset,
leftover_tmp);
if (ret)
return ret;
}
return 0;
}
static int m10bmc_sec_read(struct m10bmc_sec *sec, u8 *buf, u32 addr, u32 size)
{
struct intel_m10bmc *m10bmc = sec->m10bmc;
unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
u32 read_count = size / stride;
u32 leftover_offset = read_count * stride;
u32 leftover_size = size - leftover_offset;
u32 leftover_tmp;
int ret;
if (sec->m10bmc->flash_bulk_ops)
return sec->m10bmc->flash_bulk_ops->read(m10bmc, buf, addr, size);
if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
return -EINVAL;
ret = regmap_bulk_read(m10bmc->regmap, addr, buf, read_count);
if (ret)
return ret;
/* If size is not aligned to stride, handle the remainder bytes with regmap_read() */
if (leftover_size) {
ret = regmap_read(m10bmc->regmap, addr + leftover_offset, &leftover_tmp);
if (ret)
return ret;
memcpy(buf + leftover_offset, &leftover_tmp, leftover_size);
}
return 0;
}
static ssize_t
show_root_entry_hash(struct device *dev, u32 exp_magic,
u32 prog_addr, u32 reh_addr, char *buf)
@ -38,11 +110,9 @@ show_root_entry_hash(struct device *dev, u32 exp_magic,
struct m10bmc_sec *sec = dev_get_drvdata(dev);
int sha_num_bytes, i, ret, cnt = 0;
u8 hash[REH_SHA384_SIZE];
unsigned int stride;
u32 magic;
stride = regmap_get_reg_stride(sec->m10bmc->regmap);
ret = m10bmc_raw_read(sec->m10bmc, prog_addr, &magic);
ret = m10bmc_sec_read(sec, (u8 *)&magic, prog_addr, sizeof(magic));
if (ret)
return ret;
@ -50,19 +120,16 @@ show_root_entry_hash(struct device *dev, u32 exp_magic,
return sysfs_emit(buf, "hash not programmed\n");
sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8;
if ((sha_num_bytes % stride) ||
(sha_num_bytes != REH_SHA256_SIZE &&
sha_num_bytes != REH_SHA384_SIZE)) {
if (sha_num_bytes != REH_SHA256_SIZE &&
sha_num_bytes != REH_SHA384_SIZE) {
dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__,
sha_num_bytes);
return -EINVAL;
}
ret = regmap_bulk_read(sec->m10bmc->regmap, reh_addr,
hash, sha_num_bytes / stride);
ret = m10bmc_sec_read(sec, hash, reh_addr, sha_num_bytes);
if (ret) {
dev_err(dev, "failed to read root entry hash: %x cnt %x: %d\n",
reh_addr, sha_num_bytes / stride, ret);
dev_err(dev, "failed to read root entry hash\n");
return ret;
}
@ -73,16 +140,24 @@ show_root_entry_hash(struct device *dev, u32 exp_magic,
return cnt;
}
#define DEVICE_ATTR_SEC_REH_RO(_name, _magic, _prog_addr, _reh_addr) \
#define DEVICE_ATTR_SEC_REH_RO(_name) \
static ssize_t _name##_root_entry_hash_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ return show_root_entry_hash(dev, _magic, _prog_addr, _reh_addr, buf); } \
{ \
struct m10bmc_sec *sec = dev_get_drvdata(dev); \
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
\
return show_root_entry_hash(dev, csr_map->_name##_magic, \
csr_map->_name##_prog_addr, \
csr_map->_name##_reh_addr, \
buf); \
} \
static DEVICE_ATTR_RO(_name##_root_entry_hash)
DEVICE_ATTR_SEC_REH_RO(bmc, BMC_PROG_MAGIC, BMC_PROG_ADDR, BMC_REH_ADDR);
DEVICE_ATTR_SEC_REH_RO(sr, SR_PROG_MAGIC, SR_PROG_ADDR, SR_REH_ADDR);
DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR);
DEVICE_ATTR_SEC_REH_RO(bmc);
DEVICE_ATTR_SEC_REH_RO(sr);
DEVICE_ATTR_SEC_REH_RO(pr);
#define CSK_BIT_LEN 128U
#define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32)
@ -90,27 +165,16 @@ DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR);
static ssize_t
show_canceled_csk(struct device *dev, u32 addr, char *buf)
{
unsigned int i, stride, size = CSK_32ARRAY_SIZE * sizeof(u32);
unsigned int i, size = CSK_32ARRAY_SIZE * sizeof(u32);
struct m10bmc_sec *sec = dev_get_drvdata(dev);
DECLARE_BITMAP(csk_map, CSK_BIT_LEN);
__le32 csk_le32[CSK_32ARRAY_SIZE];
u32 csk32[CSK_32ARRAY_SIZE];
int ret;
stride = regmap_get_reg_stride(sec->m10bmc->regmap);
if (size % stride) {
dev_err(sec->dev,
"CSK vector size (0x%x) not aligned to stride (0x%x)\n",
size, stride);
WARN_ON_ONCE(1);
return -EINVAL;
}
ret = regmap_bulk_read(sec->m10bmc->regmap, addr, csk_le32,
size / stride);
ret = m10bmc_sec_read(sec, (u8 *)&csk_le32, addr, size);
if (ret) {
dev_err(sec->dev, "failed to read CSK vector: %x cnt %x: %d\n",
addr, size / stride, ret);
dev_err(sec->dev, "failed to read CSK vector\n");
return ret;
}
@ -122,18 +186,25 @@ show_canceled_csk(struct device *dev, u32 addr, char *buf)
return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN);
}
#define DEVICE_ATTR_SEC_CSK_RO(_name, _addr) \
#define DEVICE_ATTR_SEC_CSK_RO(_name) \
static ssize_t _name##_canceled_csks_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ return show_canceled_csk(dev, _addr, buf); } \
{ \
struct m10bmc_sec *sec = dev_get_drvdata(dev); \
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
\
return show_canceled_csk(dev, \
csr_map->_name##_prog_addr + CSK_VEC_OFFSET, \
buf); \
} \
static DEVICE_ATTR_RO(_name##_canceled_csks)
#define CSK_VEC_OFFSET 0x34
DEVICE_ATTR_SEC_CSK_RO(bmc, BMC_PROG_ADDR + CSK_VEC_OFFSET);
DEVICE_ATTR_SEC_CSK_RO(sr, SR_PROG_ADDR + CSK_VEC_OFFSET);
DEVICE_ATTR_SEC_CSK_RO(pr, PR_PROG_ADDR + CSK_VEC_OFFSET);
DEVICE_ATTR_SEC_CSK_RO(bmc);
DEVICE_ATTR_SEC_CSK_RO(sr);
DEVICE_ATTR_SEC_CSK_RO(pr);
#define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */
@ -141,31 +212,21 @@ static ssize_t flash_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct m10bmc_sec *sec = dev_get_drvdata(dev);
unsigned int stride, num_bits;
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
unsigned int num_bits;
u8 *flash_buf;
int cnt, ret;
stride = regmap_get_reg_stride(sec->m10bmc->regmap);
num_bits = FLASH_COUNT_SIZE * 8;
if (FLASH_COUNT_SIZE % stride) {
dev_err(sec->dev,
"FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n",
FLASH_COUNT_SIZE, stride);
WARN_ON_ONCE(1);
return -EINVAL;
}
flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
if (!flash_buf)
return -ENOMEM;
ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT,
flash_buf, FLASH_COUNT_SIZE / stride);
ret = m10bmc_sec_read(sec, flash_buf, csr_map->rsu_update_counter,
FLASH_COUNT_SIZE);
if (ret) {
dev_err(sec->dev,
"failed to read flash count: %x cnt %x: %d\n",
STAGING_FLASH_COUNT, FLASH_COUNT_SIZE / stride, ret);
dev_err(sec->dev, "failed to read flash count\n");
goto exit_free;
}
cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits);
@ -200,25 +261,94 @@ static const struct attribute_group *m10bmc_sec_attr_groups[] = {
static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell)
{
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 auth_result;
dev_err(sec->dev, "RSU error status: 0x%08x\n", doorbell);
dev_err(sec->dev, "Doorbell: 0x%08x\n", doorbell);
if (!m10bmc_sys_read(sec->m10bmc, M10BMC_AUTH_RESULT, &auth_result))
if (!m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result))
dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result);
}
static int m10bmc_sec_n3000_rsu_status(struct m10bmc_sec *sec)
{
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell;
int ret;
ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return ret;
return FIELD_GET(DRBL_RSU_STATUS, doorbell);
}
static int m10bmc_sec_n6000_rsu_status(struct m10bmc_sec *sec)
{
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 auth_result;
int ret;
ret = m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result);
if (ret)
return ret;
return FIELD_GET(AUTH_RESULT_RSU_STATUS, auth_result);
}
static bool rsu_status_ok(u32 status)
{
return (status == RSU_STAT_NORMAL ||
status == RSU_STAT_NIOS_OK ||
status == RSU_STAT_USER_OK ||
status == RSU_STAT_FACTORY_OK);
}
static bool rsu_progress_done(u32 progress)
{
return (progress == RSU_PROG_IDLE ||
progress == RSU_PROG_RSU_DONE);
}
static bool rsu_progress_busy(u32 progress)
{
return (progress == RSU_PROG_AUTHENTICATING ||
progress == RSU_PROG_COPYING ||
progress == RSU_PROG_UPDATE_CANCEL ||
progress == RSU_PROG_PROGRAM_KEY_HASH);
}
static int m10bmc_sec_progress_status(struct m10bmc_sec *sec, u32 *doorbell_reg,
u32 *progress, u32 *status)
{
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
int ret;
ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, doorbell_reg);
if (ret)
return ret;
ret = sec->ops->rsu_status(sec);
if (ret < 0)
return ret;
*status = ret;
*progress = rsu_prog(*doorbell_reg);
return 0;
}
static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec)
{
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell;
int ret;
ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
if (rsu_prog(doorbell) != RSU_PROG_IDLE &&
rsu_prog(doorbell) != RSU_PROG_RSU_DONE) {
if (!rsu_progress_done(rsu_prog(doorbell))) {
log_error_regs(sec, doorbell);
return FW_UPLOAD_ERR_BUSY;
}
@ -226,19 +356,15 @@ static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec)
return FW_UPLOAD_ERR_NONE;
}
static inline bool rsu_start_done(u32 doorbell)
static inline bool rsu_start_done(u32 doorbell_reg, u32 progress, u32 status)
{
u32 status, progress;
if (doorbell & DRBL_RSU_REQUEST)
if (doorbell_reg & DRBL_RSU_REQUEST)
return false;
status = rsu_stat(doorbell);
if (status == RSU_STAT_ERASE_FAIL || status == RSU_STAT_WEAROUT)
return true;
progress = rsu_prog(doorbell);
if (progress != RSU_PROG_IDLE && progress != RSU_PROG_RSU_DONE)
if (!rsu_progress_done(progress))
return true;
return false;
@ -246,11 +372,12 @@ static inline bool rsu_start_done(u32 doorbell)
static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
{
u32 doorbell, status;
int ret;
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell_reg, progress, status;
int ret, err;
ret = regmap_update_bits(sec->m10bmc->regmap,
M10BMC_SYS_BASE + M10BMC_DOORBELL,
csr_map->base + csr_map->doorbell,
DRBL_RSU_REQUEST | DRBL_HOST_STATUS,
DRBL_RSU_REQUEST |
FIELD_PREP(DRBL_HOST_STATUS,
@ -258,26 +385,25 @@ static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
M10BMC_SYS_BASE + M10BMC_DOORBELL,
doorbell,
rsu_start_done(doorbell),
NIOS_HANDSHAKE_INTERVAL_US,
NIOS_HANDSHAKE_TIMEOUT_US);
ret = read_poll_timeout(m10bmc_sec_progress_status, err,
err < 0 || rsu_start_done(doorbell_reg, progress, status),
NIOS_HANDSHAKE_INTERVAL_US,
NIOS_HANDSHAKE_TIMEOUT_US,
false,
sec, &doorbell_reg, &progress, &status);
if (ret == -ETIMEDOUT) {
log_error_regs(sec, doorbell);
log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_TIMEOUT;
} else if (ret) {
} else if (err) {
return FW_UPLOAD_ERR_RW_ERROR;
}
status = rsu_stat(doorbell);
if (status == RSU_STAT_WEAROUT) {
dev_warn(sec->dev, "Excessive flash update count detected\n");
return FW_UPLOAD_ERR_WEAROUT;
} else if (status == RSU_STAT_ERASE_FAIL) {
log_error_regs(sec, doorbell);
log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_HW_ERROR;
}
@ -286,11 +412,12 @@ static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
{
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
unsigned long poll_timeout;
u32 doorbell, progress;
int ret;
ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
@ -300,7 +427,7 @@ static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
if (time_after(jiffies, poll_timeout))
break;
ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
}
@ -319,11 +446,12 @@ static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
{
u32 doorbell;
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell_reg, status;
int ret;
ret = regmap_update_bits(sec->m10bmc->regmap,
M10BMC_SYS_BASE + M10BMC_DOORBELL,
csr_map->base + csr_map->doorbell,
DRBL_HOST_STATUS,
FIELD_PREP(DRBL_HOST_STATUS,
HOST_STATUS_WRITE_DONE));
@ -331,68 +459,58 @@ static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
return FW_UPLOAD_ERR_RW_ERROR;
ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
M10BMC_SYS_BASE + M10BMC_DOORBELL,
doorbell,
rsu_prog(doorbell) != RSU_PROG_READY,
csr_map->base + csr_map->doorbell,
doorbell_reg,
rsu_prog(doorbell_reg) != RSU_PROG_READY,
NIOS_HANDSHAKE_INTERVAL_US,
NIOS_HANDSHAKE_TIMEOUT_US);
if (ret == -ETIMEDOUT) {
log_error_regs(sec, doorbell);
log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_TIMEOUT;
} else if (ret) {
return FW_UPLOAD_ERR_RW_ERROR;
}
switch (rsu_stat(doorbell)) {
case RSU_STAT_NORMAL:
case RSU_STAT_NIOS_OK:
case RSU_STAT_USER_OK:
case RSU_STAT_FACTORY_OK:
break;
default:
log_error_regs(sec, doorbell);
ret = sec->ops->rsu_status(sec);
if (ret < 0)
return ret;
status = ret;
if (!rsu_status_ok(status)) {
log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_HW_ERROR;
}
return FW_UPLOAD_ERR_NONE;
}
static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell)
static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell_reg)
{
if (m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, doorbell))
u32 progress, status;
if (m10bmc_sec_progress_status(sec, doorbell_reg, &progress, &status))
return -EIO;
switch (rsu_stat(*doorbell)) {
case RSU_STAT_NORMAL:
case RSU_STAT_NIOS_OK:
case RSU_STAT_USER_OK:
case RSU_STAT_FACTORY_OK:
break;
default:
if (!rsu_status_ok(status))
return -EINVAL;
}
switch (rsu_prog(*doorbell)) {
case RSU_PROG_IDLE:
case RSU_PROG_RSU_DONE:
if (rsu_progress_done(progress))
return 0;
case RSU_PROG_AUTHENTICATING:
case RSU_PROG_COPYING:
case RSU_PROG_UPDATE_CANCEL:
case RSU_PROG_PROGRAM_KEY_HASH:
if (rsu_progress_busy(progress))
return -EAGAIN;
default:
return -EINVAL;
}
return -EINVAL;
}
static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec)
{
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell;
int ret;
ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
@ -400,7 +518,7 @@ static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec)
return FW_UPLOAD_ERR_BUSY;
ret = regmap_update_bits(sec->m10bmc->regmap,
M10BMC_SYS_BASE + M10BMC_DOORBELL,
csr_map->base + csr_map->doorbell,
DRBL_HOST_STATUS,
FIELD_PREP(DRBL_HOST_STATUS,
HOST_STATUS_ABORT_RSU));
@ -421,39 +539,50 @@ static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl,
if (!size || size > M10BMC_STAGING_SIZE)
return FW_UPLOAD_ERR_INVALID_SIZE;
if (sec->m10bmc->flash_bulk_ops)
if (sec->m10bmc->flash_bulk_ops->lock_write(sec->m10bmc))
return FW_UPLOAD_ERR_BUSY;
ret = rsu_check_idle(sec);
if (ret != FW_UPLOAD_ERR_NONE)
return ret;
goto unlock_flash;
ret = rsu_update_init(sec);
if (ret != FW_UPLOAD_ERR_NONE)
return ret;
goto unlock_flash;
ret = rsu_prog_ready(sec);
if (ret != FW_UPLOAD_ERR_NONE)
return ret;
goto unlock_flash;
if (sec->cancel_request)
return rsu_cancel(sec);
if (sec->cancel_request) {
ret = rsu_cancel(sec);
goto unlock_flash;
}
return FW_UPLOAD_ERR_NONE;
unlock_flash:
if (sec->m10bmc->flash_bulk_ops)
sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
return ret;
}
#define WRITE_BLOCK_SIZE 0x4000 /* Default write-block size is 0x4000 bytes */
static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data,
u32 offset, u32 size, u32 *written)
static enum fw_upload_err m10bmc_sec_fw_write(struct fw_upload *fwl, const u8 *data,
u32 offset, u32 size, u32 *written)
{
struct m10bmc_sec *sec = fwl->dd_handle;
u32 blk_size, doorbell, extra_offset;
unsigned int stride, extra = 0;
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
struct intel_m10bmc *m10bmc = sec->m10bmc;
u32 blk_size, doorbell;
int ret;
stride = regmap_get_reg_stride(sec->m10bmc->regmap);
if (sec->cancel_request)
return rsu_cancel(sec);
ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
ret = m10bmc_sys_read(m10bmc, csr_map->doorbell, &doorbell);
if (ret) {
return FW_UPLOAD_ERR_RW_ERROR;
} else if (rsu_prog(doorbell) != RSU_PROG_READY) {
@ -461,28 +590,12 @@ static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data
return FW_UPLOAD_ERR_HW_ERROR;
}
WARN_ON_ONCE(WRITE_BLOCK_SIZE % stride);
WARN_ON_ONCE(WRITE_BLOCK_SIZE % regmap_get_reg_stride(m10bmc->regmap));
blk_size = min_t(u32, WRITE_BLOCK_SIZE, size);
ret = regmap_bulk_write(sec->m10bmc->regmap,
M10BMC_STAGING_BASE + offset,
(void *)data + offset,
blk_size / stride);
ret = m10bmc_sec_write(sec, data, offset, blk_size);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
/*
* If blk_size is not aligned to stride, then handle the extra
* bytes with regmap_write.
*/
if (blk_size % stride) {
extra_offset = offset + ALIGN_DOWN(blk_size, stride);
memcpy(&extra, (u8 *)(data + extra_offset), blk_size % stride);
ret = regmap_write(sec->m10bmc->regmap,
M10BMC_STAGING_BASE + extra_offset, extra);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
}
*written = blk_size;
return FW_UPLOAD_ERR_NONE;
}
@ -539,16 +652,27 @@ static void m10bmc_sec_cleanup(struct fw_upload *fwl)
struct m10bmc_sec *sec = fwl->dd_handle;
(void)rsu_cancel(sec);
if (sec->m10bmc->flash_bulk_ops)
sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
}
static const struct fw_upload_ops m10bmc_ops = {
.prepare = m10bmc_sec_prepare,
.write = m10bmc_sec_write,
.write = m10bmc_sec_fw_write,
.poll_complete = m10bmc_sec_poll_complete,
.cancel = m10bmc_sec_cancel,
.cleanup = m10bmc_sec_cleanup,
};
static const struct m10bmc_sec_ops m10sec_n3000_ops = {
.rsu_status = m10bmc_sec_n3000_rsu_status,
};
static const struct m10bmc_sec_ops m10sec_n6000_ops = {
.rsu_status = m10bmc_sec_n6000_rsu_status,
};
#define SEC_UPDATE_LEN_MAX 32
static int m10bmc_sec_probe(struct platform_device *pdev)
{
@ -564,6 +688,7 @@ static int m10bmc_sec_probe(struct platform_device *pdev)
sec->dev = &pdev->dev;
sec->m10bmc = dev_get_drvdata(pdev->dev.parent);
sec->ops = (struct m10bmc_sec_ops *)platform_get_device_id(pdev)->driver_data;
dev_set_drvdata(&pdev->dev, sec);
ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec,
@ -604,9 +729,15 @@ static int m10bmc_sec_remove(struct platform_device *pdev)
static const struct platform_device_id intel_m10bmc_sec_ids[] = {
{
.name = "n3000bmc-sec-update",
.driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
},
{
.name = "d5005bmc-sec-update",
.driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
},
{
.name = "n6000bmc-sec-update",
.driver_data = (kernel_ulong_t)&m10sec_n6000_ops,
},
{ }
};

View File

@ -2341,7 +2341,7 @@ config SENSORS_XGENE
config SENSORS_INTEL_M10_BMC_HWMON
tristate "Intel MAX10 BMC Hardware Monitoring"
depends on MFD_INTEL_M10_BMC
depends on MFD_INTEL_M10_BMC_CORE
help
This driver provides support for the hardware monitoring functionality
on Intel MAX10 BMC chip.

View File

@ -2224,14 +2224,32 @@ config SGI_MFD_IOC3
If you have an SGI Origin, Octane, or a PCI IOC3 card,
then say Y. Otherwise say N.
config MFD_INTEL_M10_BMC
tristate "Intel MAX 10 Board Management Controller"
depends on SPI_MASTER
select REGMAP_SPI_AVMM
select MFD_CORE
config MFD_INTEL_M10_BMC_CORE
tristate
select MFD_CORE
select REGMAP
default n
config MFD_INTEL_M10_BMC_SPI
tristate "Intel MAX 10 Board Management Controller with SPI"
depends on SPI_MASTER
select MFD_INTEL_M10_BMC_CORE
select REGMAP_SPI_AVMM
help
Support for the Intel MAX 10 board management controller using the
SPI interface.
This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the functionality
of the device.
config MFD_INTEL_M10_BMC_PMCI
tristate "Intel MAX 10 Board Management Controller with PMCI"
depends on FPGA_DFL
select MFD_INTEL_M10_BMC_CORE
select REGMAP
help
Support for the Intel MAX 10 board management controller using the
SPI interface.
Support for the Intel MAX 10 board management controller via PMCI.
This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the functionality

View File

@ -269,7 +269,10 @@ obj-$(CONFIG_MFD_QCOM_PM8008) += qcom-pm8008.o
obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o
obj-$(CONFIG_MFD_SIMPLE_MFD_I2C) += simple-mfd-i2c.o
obj-$(CONFIG_MFD_SMPRO) += smpro-core.o
obj-$(CONFIG_MFD_INTEL_M10_BMC) += intel-m10-bmc.o
obj-$(CONFIG_MFD_INTEL_M10_BMC_CORE) += intel-m10-bmc-core.o
obj-$(CONFIG_MFD_INTEL_M10_BMC_SPI) += intel-m10-bmc-spi.o
obj-$(CONFIG_MFD_INTEL_M10_BMC_PMCI) += intel-m10-bmc-pmci.o
obj-$(CONFIG_MFD_ATC260X) += atc260x-core.o
obj-$(CONFIG_MFD_ATC260X_I2C) += atc260x-i2c.o

View File

@ -0,0 +1,122 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Intel MAX 10 Board Management Controller chip - common code
*
* Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
*/
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/dev_printk.h>
#include <linux/mfd/core.h>
#include <linux/mfd/intel-m10-bmc.h>
#include <linux/module.h>
static ssize_t bmc_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *ddata = dev_get_drvdata(dev);
unsigned int val;
int ret;
ret = m10bmc_sys_read(ddata, ddata->info->csr_map->build_version, &val);
if (ret)
return ret;
return sprintf(buf, "0x%x\n", val);
}
static DEVICE_ATTR_RO(bmc_version);
static ssize_t bmcfw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *ddata = dev_get_drvdata(dev);
unsigned int val;
int ret;
ret = m10bmc_sys_read(ddata, ddata->info->csr_map->fw_version, &val);
if (ret)
return ret;
return sprintf(buf, "0x%x\n", val);
}
static DEVICE_ATTR_RO(bmcfw_version);
static ssize_t mac_address_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *ddata = dev_get_drvdata(dev);
unsigned int macaddr_low, macaddr_high;
int ret;
ret = m10bmc_sys_read(ddata, ddata->info->csr_map->mac_low, &macaddr_low);
if (ret)
return ret;
ret = m10bmc_sys_read(ddata, ddata->info->csr_map->mac_high, &macaddr_high);
if (ret)
return ret;
return sysfs_emit(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
(u8)FIELD_GET(M10BMC_N3000_MAC_BYTE1, macaddr_low),
(u8)FIELD_GET(M10BMC_N3000_MAC_BYTE2, macaddr_low),
(u8)FIELD_GET(M10BMC_N3000_MAC_BYTE3, macaddr_low),
(u8)FIELD_GET(M10BMC_N3000_MAC_BYTE4, macaddr_low),
(u8)FIELD_GET(M10BMC_N3000_MAC_BYTE5, macaddr_high),
(u8)FIELD_GET(M10BMC_N3000_MAC_BYTE6, macaddr_high));
}
static DEVICE_ATTR_RO(mac_address);
static ssize_t mac_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *ddata = dev_get_drvdata(dev);
unsigned int macaddr_high;
int ret;
ret = m10bmc_sys_read(ddata, ddata->info->csr_map->mac_high, &macaddr_high);
if (ret)
return ret;
return sysfs_emit(buf, "%u\n", (u8)FIELD_GET(M10BMC_N3000_MAC_COUNT, macaddr_high));
}
static DEVICE_ATTR_RO(mac_count);
static struct attribute *m10bmc_attrs[] = {
&dev_attr_bmc_version.attr,
&dev_attr_bmcfw_version.attr,
&dev_attr_mac_address.attr,
&dev_attr_mac_count.attr,
NULL,
};
static const struct attribute_group m10bmc_group = {
.attrs = m10bmc_attrs,
};
const struct attribute_group *m10bmc_dev_groups[] = {
&m10bmc_group,
NULL,
};
EXPORT_SYMBOL_GPL(m10bmc_dev_groups);
int m10bmc_dev_init(struct intel_m10bmc *m10bmc, const struct intel_m10bmc_platform_info *info)
{
int ret;
m10bmc->info = info;
dev_set_drvdata(m10bmc->dev, m10bmc);
ret = devm_mfd_add_devices(m10bmc->dev, PLATFORM_DEVID_AUTO,
info->cells, info->n_cells,
NULL, 0, NULL);
if (ret)
dev_err(m10bmc->dev, "Failed to register sub-devices: %d\n", ret);
return ret;
}
EXPORT_SYMBOL_GPL(m10bmc_dev_init);
MODULE_DESCRIPTION("Intel MAX 10 BMC core driver");
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,455 @@
// SPDX-License-Identifier: GPL-2.0
/*
* MAX10 BMC Platform Management Component Interface (PMCI) based
* interface.
*
* Copyright (C) 2020-2023 Intel Corporation.
*/
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/dfl.h>
#include <linux/mfd/core.h>
#include <linux/mfd/intel-m10-bmc.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/regmap.h>
struct m10bmc_pmci_device {
void __iomem *base;
struct intel_m10bmc m10bmc;
struct mutex flash_mutex; /* protects flash_busy and serializes flash read/read */
bool flash_busy;
};
/*
* Intel FGPA indirect register access via hardware controller/bridge.
*/
#define INDIRECT_CMD_OFF 0
#define INDIRECT_CMD_CLR 0
#define INDIRECT_CMD_RD BIT(0)
#define INDIRECT_CMD_WR BIT(1)
#define INDIRECT_CMD_ACK BIT(2)
#define INDIRECT_ADDR_OFF 0x4
#define INDIRECT_RD_OFF 0x8
#define INDIRECT_WR_OFF 0xc
#define INDIRECT_INT_US 1
#define INDIRECT_TIMEOUT_US 10000
struct indirect_ctx {
void __iomem *base;
struct device *dev;
};
static int indirect_clear_cmd(struct indirect_ctx *ctx)
{
unsigned int cmd;
int ret;
writel(INDIRECT_CMD_CLR, ctx->base + INDIRECT_CMD_OFF);
ret = readl_poll_timeout(ctx->base + INDIRECT_CMD_OFF, cmd,
cmd == INDIRECT_CMD_CLR,
INDIRECT_INT_US, INDIRECT_TIMEOUT_US);
if (ret)
dev_err(ctx->dev, "timed out waiting clear cmd (residual cmd=0x%x)\n", cmd);
return ret;
}
static int indirect_reg_read(void *context, unsigned int reg, unsigned int *val)
{
struct indirect_ctx *ctx = context;
unsigned int cmd, ack, tmpval;
int ret, ret2;
cmd = readl(ctx->base + INDIRECT_CMD_OFF);
if (cmd != INDIRECT_CMD_CLR)
dev_warn(ctx->dev, "residual cmd 0x%x on read entry\n", cmd);
writel(reg, ctx->base + INDIRECT_ADDR_OFF);
writel(INDIRECT_CMD_RD, ctx->base + INDIRECT_CMD_OFF);
ret = readl_poll_timeout(ctx->base + INDIRECT_CMD_OFF, ack,
(ack & INDIRECT_CMD_ACK) == INDIRECT_CMD_ACK,
INDIRECT_INT_US, INDIRECT_TIMEOUT_US);
if (ret)
dev_err(ctx->dev, "read timed out on reg 0x%x ack 0x%x\n", reg, ack);
else
tmpval = readl(ctx->base + INDIRECT_RD_OFF);
ret2 = indirect_clear_cmd(ctx);
if (ret)
return ret;
if (ret2)
return ret2;
*val = tmpval;
return 0;
}
static int indirect_reg_write(void *context, unsigned int reg, unsigned int val)
{
struct indirect_ctx *ctx = context;
unsigned int cmd, ack;
int ret, ret2;
cmd = readl(ctx->base + INDIRECT_CMD_OFF);
if (cmd != INDIRECT_CMD_CLR)
dev_warn(ctx->dev, "residual cmd 0x%x on write entry\n", cmd);
writel(val, ctx->base + INDIRECT_WR_OFF);
writel(reg, ctx->base + INDIRECT_ADDR_OFF);
writel(INDIRECT_CMD_WR, ctx->base + INDIRECT_CMD_OFF);
ret = readl_poll_timeout(ctx->base + INDIRECT_CMD_OFF, ack,
(ack & INDIRECT_CMD_ACK) == INDIRECT_CMD_ACK,
INDIRECT_INT_US, INDIRECT_TIMEOUT_US);
if (ret)
dev_err(ctx->dev, "write timed out on reg 0x%x ack 0x%x\n", reg, ack);
ret2 = indirect_clear_cmd(ctx);
if (ret)
return ret;
return ret2;
}
static void pmci_write_fifo(void __iomem *base, const u32 *buf, size_t count)
{
while (count--)
writel(*buf++, base);
}
static void pmci_read_fifo(void __iomem *base, u32 *buf, size_t count)
{
while (count--)
*buf++ = readl(base);
}
static u32 pmci_get_write_space(struct m10bmc_pmci_device *pmci)
{
u32 val;
int ret;
ret = read_poll_timeout(readl, val,
FIELD_GET(M10BMC_N6000_FLASH_FIFO_SPACE, val) ==
M10BMC_N6000_FIFO_MAX_WORDS,
M10BMC_FLASH_INT_US, M10BMC_FLASH_TIMEOUT_US,
false, pmci->base + M10BMC_N6000_FLASH_CTRL);
if (ret == -ETIMEDOUT)
return 0;
return FIELD_GET(M10BMC_N6000_FLASH_FIFO_SPACE, val) * M10BMC_N6000_FIFO_WORD_SIZE;
}
static int pmci_flash_bulk_write(struct intel_m10bmc *m10bmc, const u8 *buf, u32 size)
{
struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
u32 blk_size, offset = 0, write_count;
while (size) {
blk_size = min(pmci_get_write_space(pmci), size);
if (blk_size == 0) {
dev_err(m10bmc->dev, "get FIFO available size fail\n");
return -EIO;
}
if (size < M10BMC_N6000_FIFO_WORD_SIZE)
break;
write_count = blk_size / M10BMC_N6000_FIFO_WORD_SIZE;
pmci_write_fifo(pmci->base + M10BMC_N6000_FLASH_FIFO,
(u32 *)(buf + offset), write_count);
size -= blk_size;
offset += blk_size;
}
/* Handle remainder (less than M10BMC_N6000_FIFO_WORD_SIZE bytes) */
if (size) {
u32 tmp = 0;
memcpy(&tmp, buf + offset, size);
pmci_write_fifo(pmci->base + M10BMC_N6000_FLASH_FIFO, &tmp, 1);
}
return 0;
}
static int pmci_flash_bulk_read(struct intel_m10bmc *m10bmc, u8 *buf, u32 addr, u32 size)
{
struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
u32 blk_size, offset = 0, val, full_read_count, read_count;
int ret;
while (size) {
blk_size = min_t(u32, size, M10BMC_N6000_READ_BLOCK_SIZE);
full_read_count = blk_size / M10BMC_N6000_FIFO_WORD_SIZE;
read_count = full_read_count;
if (full_read_count * M10BMC_N6000_FIFO_WORD_SIZE < blk_size)
read_count++;
writel(addr + offset, pmci->base + M10BMC_N6000_FLASH_ADDR);
writel(FIELD_PREP(M10BMC_N6000_FLASH_READ_COUNT, read_count) |
M10BMC_N6000_FLASH_RD_MODE,
pmci->base + M10BMC_N6000_FLASH_CTRL);
ret = readl_poll_timeout((pmci->base + M10BMC_N6000_FLASH_CTRL), val,
!(val & M10BMC_N6000_FLASH_BUSY),
M10BMC_FLASH_INT_US, M10BMC_FLASH_TIMEOUT_US);
if (ret) {
dev_err(m10bmc->dev, "read timed out on reading flash 0x%xn", val);
return ret;
}
pmci_read_fifo(pmci->base + M10BMC_N6000_FLASH_FIFO,
(u32 *)(buf + offset), full_read_count);
size -= blk_size;
offset += blk_size;
if (full_read_count < read_count)
break;
writel(0, pmci->base + M10BMC_N6000_FLASH_CTRL);
}
/* Handle remainder (less than M10BMC_N6000_FIFO_WORD_SIZE bytes) */
if (size) {
u32 tmp;
pmci_read_fifo(pmci->base + M10BMC_N6000_FLASH_FIFO, &tmp, 1);
memcpy(buf + offset, &tmp, size);
writel(0, pmci->base + M10BMC_N6000_FLASH_CTRL);
}
return 0;
}
static int m10bmc_pmci_set_flash_host_mux(struct intel_m10bmc *m10bmc, bool request)
{
u32 ctrl;
int ret;
ret = regmap_update_bits(m10bmc->regmap, M10BMC_N6000_FLASH_MUX_CTRL,
M10BMC_N6000_FLASH_HOST_REQUEST,
FIELD_PREP(M10BMC_N6000_FLASH_HOST_REQUEST, request));
if (ret)
return ret;
return regmap_read_poll_timeout(m10bmc->regmap,
M10BMC_N6000_FLASH_MUX_CTRL, ctrl,
request ?
(get_flash_mux(ctrl) == M10BMC_N6000_FLASH_MUX_HOST) :
(get_flash_mux(ctrl) != M10BMC_N6000_FLASH_MUX_HOST),
M10BMC_FLASH_INT_US, M10BMC_FLASH_TIMEOUT_US);
}
static int m10bmc_pmci_flash_read(struct intel_m10bmc *m10bmc, u8 *buf, u32 addr, u32 size)
{
struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
int ret, ret2;
mutex_lock(&pmci->flash_mutex);
if (pmci->flash_busy) {
ret = -EBUSY;
goto unlock;
}
ret = m10bmc_pmci_set_flash_host_mux(m10bmc, true);
if (ret)
goto mux_fail;
ret = pmci_flash_bulk_read(m10bmc, buf, addr, size);
mux_fail:
ret2 = m10bmc_pmci_set_flash_host_mux(m10bmc, false);
unlock:
mutex_unlock(&pmci->flash_mutex);
if (ret)
return ret;
return ret2;
}
static int m10bmc_pmci_flash_write(struct intel_m10bmc *m10bmc, const u8 *buf, u32 offset, u32 size)
{
struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
int ret;
mutex_lock(&pmci->flash_mutex);
WARN_ON_ONCE(!pmci->flash_busy);
/* On write, firmware manages flash MUX */
ret = pmci_flash_bulk_write(m10bmc, buf + offset, size);
mutex_unlock(&pmci->flash_mutex);
return ret;
}
static int m10bmc_pmci_flash_lock(struct intel_m10bmc *m10bmc)
{
struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
int ret = 0;
mutex_lock(&pmci->flash_mutex);
if (pmci->flash_busy) {
ret = -EBUSY;
goto unlock;
}
pmci->flash_busy = true;
unlock:
mutex_unlock(&pmci->flash_mutex);
return ret;
}
static void m10bmc_pmci_flash_unlock(struct intel_m10bmc *m10bmc)
{
struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
mutex_lock(&pmci->flash_mutex);
WARN_ON_ONCE(!pmci->flash_busy);
pmci->flash_busy = false;
mutex_unlock(&pmci->flash_mutex);
}
static const struct intel_m10bmc_flash_bulk_ops m10bmc_pmci_flash_bulk_ops = {
.read = m10bmc_pmci_flash_read,
.write = m10bmc_pmci_flash_write,
.lock_write = m10bmc_pmci_flash_lock,
.unlock_write = m10bmc_pmci_flash_unlock,
};
static const struct regmap_range m10bmc_pmci_regmap_range[] = {
regmap_reg_range(M10BMC_N6000_SYS_BASE, M10BMC_N6000_SYS_END),
};
static const struct regmap_access_table m10bmc_pmci_access_table = {
.yes_ranges = m10bmc_pmci_regmap_range,
.n_yes_ranges = ARRAY_SIZE(m10bmc_pmci_regmap_range),
};
static struct regmap_config m10bmc_pmci_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.wr_table = &m10bmc_pmci_access_table,
.rd_table = &m10bmc_pmci_access_table,
.reg_read = &indirect_reg_read,
.reg_write = &indirect_reg_write,
.max_register = M10BMC_N6000_SYS_END,
};
static struct mfd_cell m10bmc_pmci_n6000_bmc_subdevs[] = {
{ .name = "n6000bmc-hwmon" },
{ .name = "n6000bmc-sec-update" },
};
static const struct m10bmc_csr_map m10bmc_n6000_csr_map = {
.base = M10BMC_N6000_SYS_BASE,
.build_version = M10BMC_N6000_BUILD_VER,
.fw_version = NIOS2_N6000_FW_VERSION,
.mac_low = M10BMC_N6000_MAC_LOW,
.mac_high = M10BMC_N6000_MAC_HIGH,
.doorbell = M10BMC_N6000_DOORBELL,
.auth_result = M10BMC_N6000_AUTH_RESULT,
.bmc_prog_addr = M10BMC_N6000_BMC_PROG_ADDR,
.bmc_reh_addr = M10BMC_N6000_BMC_REH_ADDR,
.bmc_magic = M10BMC_N6000_BMC_PROG_MAGIC,
.sr_prog_addr = M10BMC_N6000_SR_PROG_ADDR,
.sr_reh_addr = M10BMC_N6000_SR_REH_ADDR,
.sr_magic = M10BMC_N6000_SR_PROG_MAGIC,
.pr_prog_addr = M10BMC_N6000_PR_PROG_ADDR,
.pr_reh_addr = M10BMC_N6000_PR_REH_ADDR,
.pr_magic = M10BMC_N6000_PR_PROG_MAGIC,
.rsu_update_counter = M10BMC_N6000_STAGING_FLASH_COUNT,
};
static const struct intel_m10bmc_platform_info m10bmc_pmci_n6000 = {
.cells = m10bmc_pmci_n6000_bmc_subdevs,
.n_cells = ARRAY_SIZE(m10bmc_pmci_n6000_bmc_subdevs),
.csr_map = &m10bmc_n6000_csr_map,
};
static int m10bmc_pmci_probe(struct dfl_device *ddev)
{
struct device *dev = &ddev->dev;
struct m10bmc_pmci_device *pmci;
struct indirect_ctx *ctx;
int ret;
pmci = devm_kzalloc(dev, sizeof(*pmci), GFP_KERNEL);
if (!pmci)
return -ENOMEM;
pmci->m10bmc.flash_bulk_ops = &m10bmc_pmci_flash_bulk_ops;
pmci->m10bmc.dev = dev;
pmci->base = devm_ioremap_resource(dev, &ddev->mmio_res);
if (IS_ERR(pmci->base))
return PTR_ERR(pmci->base);
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
mutex_init(&pmci->flash_mutex);
ctx->base = pmci->base + M10BMC_N6000_INDIRECT_BASE;
ctx->dev = dev;
indirect_clear_cmd(ctx);
pmci->m10bmc.regmap = devm_regmap_init(dev, NULL, ctx, &m10bmc_pmci_regmap_config);
if (IS_ERR(pmci->m10bmc.regmap)) {
ret = PTR_ERR(pmci->m10bmc.regmap);
goto destroy_mutex;
}
ret = m10bmc_dev_init(&pmci->m10bmc, &m10bmc_pmci_n6000);
if (ret)
goto destroy_mutex;
return 0;
destroy_mutex:
mutex_destroy(&pmci->flash_mutex);
return ret;
}
static void m10bmc_pmci_remove(struct dfl_device *ddev)
{
struct intel_m10bmc *m10bmc = dev_get_drvdata(&ddev->dev);
struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
mutex_destroy(&pmci->flash_mutex);
}
#define FME_FEATURE_ID_M10BMC_PMCI 0x12
static const struct dfl_device_id m10bmc_pmci_ids[] = {
{ FME_ID, FME_FEATURE_ID_M10BMC_PMCI },
{ }
};
MODULE_DEVICE_TABLE(dfl, m10bmc_pmci_ids);
static struct dfl_driver m10bmc_pmci_driver = {
.drv = {
.name = "intel-m10-bmc",
.dev_groups = m10bmc_dev_groups,
},
.id_table = m10bmc_pmci_ids,
.probe = m10bmc_pmci_probe,
.remove = m10bmc_pmci_remove,
};
module_dfl_driver(m10bmc_pmci_driver);
MODULE_DESCRIPTION("MAX10 BMC PMCI-based interface");
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,168 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Intel MAX 10 Board Management Controller chip
*
* Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
*/
#include <linux/bitfield.h>
#include <linux/dev_printk.h>
#include <linux/init.h>
#include <linux/mfd/core.h>
#include <linux/mfd/intel-m10-bmc.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
static const struct regmap_range m10bmc_regmap_range[] = {
regmap_reg_range(M10BMC_N3000_LEGACY_BUILD_VER, M10BMC_N3000_LEGACY_BUILD_VER),
regmap_reg_range(M10BMC_N3000_SYS_BASE, M10BMC_N3000_SYS_END),
regmap_reg_range(M10BMC_N3000_FLASH_BASE, M10BMC_N3000_FLASH_END),
};
static const struct regmap_access_table m10bmc_access_table = {
.yes_ranges = m10bmc_regmap_range,
.n_yes_ranges = ARRAY_SIZE(m10bmc_regmap_range),
};
static struct regmap_config intel_m10bmc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.wr_table = &m10bmc_access_table,
.rd_table = &m10bmc_access_table,
.max_register = M10BMC_N3000_MEM_END,
};
static int check_m10bmc_version(struct intel_m10bmc *ddata)
{
unsigned int v;
int ret;
/*
* This check is to filter out the very old legacy BMC versions. In the
* old BMC chips, the BMC version info is stored in the old version
* register (M10BMC_N3000_LEGACY_BUILD_VER), so its read out value would have
* not been M10BMC_N3000_VER_LEGACY_INVALID (0xffffffff). But in new BMC
* chips that the driver supports, the value of this register should be
* M10BMC_N3000_VER_LEGACY_INVALID.
*/
ret = m10bmc_raw_read(ddata, M10BMC_N3000_LEGACY_BUILD_VER, &v);
if (ret)
return -ENODEV;
if (v != M10BMC_N3000_VER_LEGACY_INVALID) {
dev_err(ddata->dev, "bad version M10BMC detected\n");
return -ENODEV;
}
return 0;
}
static int intel_m10_bmc_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
const struct intel_m10bmc_platform_info *info;
struct device *dev = &spi->dev;
struct intel_m10bmc *ddata;
int ret;
ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
info = (struct intel_m10bmc_platform_info *)id->driver_data;
ddata->dev = dev;
ddata->regmap = devm_regmap_init_spi_avmm(spi, &intel_m10bmc_regmap_config);
if (IS_ERR(ddata->regmap)) {
ret = PTR_ERR(ddata->regmap);
dev_err(dev, "Failed to allocate regmap: %d\n", ret);
return ret;
}
spi_set_drvdata(spi, ddata);
ret = check_m10bmc_version(ddata);
if (ret) {
dev_err(dev, "Failed to identify m10bmc hardware\n");
return ret;
}
return m10bmc_dev_init(ddata, info);
}
static const struct m10bmc_csr_map m10bmc_n3000_csr_map = {
.base = M10BMC_N3000_SYS_BASE,
.build_version = M10BMC_N3000_BUILD_VER,
.fw_version = NIOS2_N3000_FW_VERSION,
.mac_low = M10BMC_N3000_MAC_LOW,
.mac_high = M10BMC_N3000_MAC_HIGH,
.doorbell = M10BMC_N3000_DOORBELL,
.auth_result = M10BMC_N3000_AUTH_RESULT,
.bmc_prog_addr = M10BMC_N3000_BMC_PROG_ADDR,
.bmc_reh_addr = M10BMC_N3000_BMC_REH_ADDR,
.bmc_magic = M10BMC_N3000_BMC_PROG_MAGIC,
.sr_prog_addr = M10BMC_N3000_SR_PROG_ADDR,
.sr_reh_addr = M10BMC_N3000_SR_REH_ADDR,
.sr_magic = M10BMC_N3000_SR_PROG_MAGIC,
.pr_prog_addr = M10BMC_N3000_PR_PROG_ADDR,
.pr_reh_addr = M10BMC_N3000_PR_REH_ADDR,
.pr_magic = M10BMC_N3000_PR_PROG_MAGIC,
.rsu_update_counter = M10BMC_N3000_STAGING_FLASH_COUNT,
};
static struct mfd_cell m10bmc_d5005_subdevs[] = {
{ .name = "d5005bmc-hwmon" },
{ .name = "d5005bmc-sec-update" },
};
static struct mfd_cell m10bmc_pacn3000_subdevs[] = {
{ .name = "n3000bmc-hwmon" },
{ .name = "n3000bmc-retimer" },
{ .name = "n3000bmc-sec-update" },
};
static struct mfd_cell m10bmc_n5010_subdevs[] = {
{ .name = "n5010bmc-hwmon" },
};
static const struct intel_m10bmc_platform_info m10bmc_spi_n3000 = {
.cells = m10bmc_pacn3000_subdevs,
.n_cells = ARRAY_SIZE(m10bmc_pacn3000_subdevs),
.csr_map = &m10bmc_n3000_csr_map,
};
static const struct intel_m10bmc_platform_info m10bmc_spi_d5005 = {
.cells = m10bmc_d5005_subdevs,
.n_cells = ARRAY_SIZE(m10bmc_d5005_subdevs),
.csr_map = &m10bmc_n3000_csr_map,
};
static const struct intel_m10bmc_platform_info m10bmc_spi_n5010 = {
.cells = m10bmc_n5010_subdevs,
.n_cells = ARRAY_SIZE(m10bmc_n5010_subdevs),
.csr_map = &m10bmc_n3000_csr_map,
};
static const struct spi_device_id m10bmc_spi_id[] = {
{ "m10-n3000", (kernel_ulong_t)&m10bmc_spi_n3000 },
{ "m10-d5005", (kernel_ulong_t)&m10bmc_spi_d5005 },
{ "m10-n5010", (kernel_ulong_t)&m10bmc_spi_n5010 },
{ }
};
MODULE_DEVICE_TABLE(spi, m10bmc_spi_id);
static struct spi_driver intel_m10bmc_spi_driver = {
.driver = {
.name = "intel-m10-bmc",
.dev_groups = m10bmc_dev_groups,
},
.probe = intel_m10_bmc_spi_probe,
.id_table = m10bmc_spi_id,
};
module_spi_driver(intel_m10bmc_spi_driver);
MODULE_DESCRIPTION("Intel MAX 10 BMC SPI bus interface");
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("spi:intel-m10-bmc");

View File

@ -1,238 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Intel MAX 10 Board Management Controller chip
*
* Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
*/
#include <linux/bitfield.h>
#include <linux/init.h>
#include <linux/mfd/core.h>
#include <linux/mfd/intel-m10-bmc.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
enum m10bmc_type {
M10_N3000,
M10_D5005,
M10_N5010,
};
static struct mfd_cell m10bmc_d5005_subdevs[] = {
{ .name = "d5005bmc-hwmon" },
{ .name = "d5005bmc-sec-update" }
};
static struct mfd_cell m10bmc_pacn3000_subdevs[] = {
{ .name = "n3000bmc-hwmon" },
{ .name = "n3000bmc-retimer" },
{ .name = "n3000bmc-sec-update" },
};
static struct mfd_cell m10bmc_n5010_subdevs[] = {
{ .name = "n5010bmc-hwmon" },
};
static const struct regmap_range m10bmc_regmap_range[] = {
regmap_reg_range(M10BMC_LEGACY_BUILD_VER, M10BMC_LEGACY_BUILD_VER),
regmap_reg_range(M10BMC_SYS_BASE, M10BMC_SYS_END),
regmap_reg_range(M10BMC_FLASH_BASE, M10BMC_FLASH_END),
};
static const struct regmap_access_table m10bmc_access_table = {
.yes_ranges = m10bmc_regmap_range,
.n_yes_ranges = ARRAY_SIZE(m10bmc_regmap_range),
};
static struct regmap_config intel_m10bmc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.wr_table = &m10bmc_access_table,
.rd_table = &m10bmc_access_table,
.max_register = M10BMC_MEM_END,
};
static ssize_t bmc_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *ddata = dev_get_drvdata(dev);
unsigned int val;
int ret;
ret = m10bmc_sys_read(ddata, M10BMC_BUILD_VER, &val);
if (ret)
return ret;
return sprintf(buf, "0x%x\n", val);
}
static DEVICE_ATTR_RO(bmc_version);
static ssize_t bmcfw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *ddata = dev_get_drvdata(dev);
unsigned int val;
int ret;
ret = m10bmc_sys_read(ddata, NIOS2_FW_VERSION, &val);
if (ret)
return ret;
return sprintf(buf, "0x%x\n", val);
}
static DEVICE_ATTR_RO(bmcfw_version);
static ssize_t mac_address_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *max10 = dev_get_drvdata(dev);
unsigned int macaddr_low, macaddr_high;
int ret;
ret = m10bmc_sys_read(max10, M10BMC_MAC_LOW, &macaddr_low);
if (ret)
return ret;
ret = m10bmc_sys_read(max10, M10BMC_MAC_HIGH, &macaddr_high);
if (ret)
return ret;
return sysfs_emit(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
(u8)FIELD_GET(M10BMC_MAC_BYTE1, macaddr_low),
(u8)FIELD_GET(M10BMC_MAC_BYTE2, macaddr_low),
(u8)FIELD_GET(M10BMC_MAC_BYTE3, macaddr_low),
(u8)FIELD_GET(M10BMC_MAC_BYTE4, macaddr_low),
(u8)FIELD_GET(M10BMC_MAC_BYTE5, macaddr_high),
(u8)FIELD_GET(M10BMC_MAC_BYTE6, macaddr_high));
}
static DEVICE_ATTR_RO(mac_address);
static ssize_t mac_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *max10 = dev_get_drvdata(dev);
unsigned int macaddr_high;
int ret;
ret = m10bmc_sys_read(max10, M10BMC_MAC_HIGH, &macaddr_high);
if (ret)
return ret;
return sysfs_emit(buf, "%u\n",
(u8)FIELD_GET(M10BMC_MAC_COUNT, macaddr_high));
}
static DEVICE_ATTR_RO(mac_count);
static struct attribute *m10bmc_attrs[] = {
&dev_attr_bmc_version.attr,
&dev_attr_bmcfw_version.attr,
&dev_attr_mac_address.attr,
&dev_attr_mac_count.attr,
NULL,
};
ATTRIBUTE_GROUPS(m10bmc);
static int check_m10bmc_version(struct intel_m10bmc *ddata)
{
unsigned int v;
int ret;
/*
* This check is to filter out the very old legacy BMC versions. In the
* old BMC chips, the BMC version info is stored in the old version
* register (M10BMC_LEGACY_BUILD_VER), so its read out value would have
* not been M10BMC_VER_LEGACY_INVALID (0xffffffff). But in new BMC
* chips that the driver supports, the value of this register should be
* M10BMC_VER_LEGACY_INVALID.
*/
ret = m10bmc_raw_read(ddata, M10BMC_LEGACY_BUILD_VER, &v);
if (ret)
return -ENODEV;
if (v != M10BMC_VER_LEGACY_INVALID) {
dev_err(ddata->dev, "bad version M10BMC detected\n");
return -ENODEV;
}
return 0;
}
static int intel_m10_bmc_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct device *dev = &spi->dev;
struct mfd_cell *cells;
struct intel_m10bmc *ddata;
int ret, n_cell;
ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
ddata->dev = dev;
ddata->regmap =
devm_regmap_init_spi_avmm(spi, &intel_m10bmc_regmap_config);
if (IS_ERR(ddata->regmap)) {
ret = PTR_ERR(ddata->regmap);
dev_err(dev, "Failed to allocate regmap: %d\n", ret);
return ret;
}
spi_set_drvdata(spi, ddata);
ret = check_m10bmc_version(ddata);
if (ret) {
dev_err(dev, "Failed to identify m10bmc hardware\n");
return ret;
}
switch (id->driver_data) {
case M10_N3000:
cells = m10bmc_pacn3000_subdevs;
n_cell = ARRAY_SIZE(m10bmc_pacn3000_subdevs);
break;
case M10_D5005:
cells = m10bmc_d5005_subdevs;
n_cell = ARRAY_SIZE(m10bmc_d5005_subdevs);
break;
case M10_N5010:
cells = m10bmc_n5010_subdevs;
n_cell = ARRAY_SIZE(m10bmc_n5010_subdevs);
break;
default:
return -ENODEV;
}
ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, cells, n_cell,
NULL, 0, NULL);
if (ret)
dev_err(dev, "Failed to register sub-devices: %d\n", ret);
return ret;
}
static const struct spi_device_id m10bmc_spi_id[] = {
{ "m10-n3000", M10_N3000 },
{ "m10-d5005", M10_D5005 },
{ "m10-n5010", M10_N5010 },
{ }
};
MODULE_DEVICE_TABLE(spi, m10bmc_spi_id);
static struct spi_driver intel_m10bmc_spi_driver = {
.driver = {
.name = "intel-m10-bmc",
.dev_groups = m10bmc_groups,
},
.probe = intel_m10_bmc_spi_probe,
.id_table = m10bmc_spi_id,
};
module_spi_driver(intel_m10bmc_spi_driver);
MODULE_DESCRIPTION("Intel MAX 10 BMC Device Driver");
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("spi:intel-m10-bmc");

View File

@ -7,40 +7,43 @@
#ifndef __MFD_INTEL_M10_BMC_H
#define __MFD_INTEL_M10_BMC_H
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/dev_printk.h>
#include <linux/regmap.h>
#define M10BMC_LEGACY_BUILD_VER 0x300468
#define M10BMC_SYS_BASE 0x300800
#define M10BMC_SYS_END 0x300fff
#define M10BMC_FLASH_BASE 0x10000000
#define M10BMC_FLASH_END 0x1fffffff
#define M10BMC_MEM_END M10BMC_FLASH_END
#define M10BMC_N3000_LEGACY_BUILD_VER 0x300468
#define M10BMC_N3000_SYS_BASE 0x300800
#define M10BMC_N3000_SYS_END 0x300fff
#define M10BMC_N3000_FLASH_BASE 0x10000000
#define M10BMC_N3000_FLASH_END 0x1fffffff
#define M10BMC_N3000_MEM_END M10BMC_N3000_FLASH_END
#define M10BMC_STAGING_BASE 0x18000000
#define M10BMC_STAGING_SIZE 0x3800000
/* Register offset of system registers */
#define NIOS2_FW_VERSION 0x0
#define M10BMC_MAC_LOW 0x10
#define M10BMC_MAC_BYTE4 GENMASK(7, 0)
#define M10BMC_MAC_BYTE3 GENMASK(15, 8)
#define M10BMC_MAC_BYTE2 GENMASK(23, 16)
#define M10BMC_MAC_BYTE1 GENMASK(31, 24)
#define M10BMC_MAC_HIGH 0x14
#define M10BMC_MAC_BYTE6 GENMASK(7, 0)
#define M10BMC_MAC_BYTE5 GENMASK(15, 8)
#define M10BMC_MAC_COUNT GENMASK(23, 16)
#define M10BMC_TEST_REG 0x3c
#define M10BMC_BUILD_VER 0x68
#define M10BMC_VER_MAJOR_MSK GENMASK(23, 16)
#define M10BMC_VER_PCB_INFO_MSK GENMASK(31, 24)
#define M10BMC_VER_LEGACY_INVALID 0xffffffff
#define NIOS2_N3000_FW_VERSION 0x0
#define M10BMC_N3000_MAC_LOW 0x10
#define M10BMC_N3000_MAC_BYTE4 GENMASK(7, 0)
#define M10BMC_N3000_MAC_BYTE3 GENMASK(15, 8)
#define M10BMC_N3000_MAC_BYTE2 GENMASK(23, 16)
#define M10BMC_N3000_MAC_BYTE1 GENMASK(31, 24)
#define M10BMC_N3000_MAC_HIGH 0x14
#define M10BMC_N3000_MAC_BYTE6 GENMASK(7, 0)
#define M10BMC_N3000_MAC_BYTE5 GENMASK(15, 8)
#define M10BMC_N3000_MAC_COUNT GENMASK(23, 16)
#define M10BMC_N3000_TEST_REG 0x3c
#define M10BMC_N3000_BUILD_VER 0x68
#define M10BMC_N3000_VER_MAJOR_MSK GENMASK(23, 16)
#define M10BMC_N3000_VER_PCB_INFO_MSK GENMASK(31, 24)
#define M10BMC_N3000_VER_LEGACY_INVALID 0xffffffff
/* Secure update doorbell register, in system register region */
#define M10BMC_DOORBELL 0x400
#define M10BMC_N3000_DOORBELL 0x400
/* Authorization Result register, in system register region */
#define M10BMC_AUTH_RESULT 0x404
#define M10BMC_N3000_AUTH_RESULT 0x404
/* Doorbell register fields */
#define DRBL_RSU_REQUEST BIT(0)
@ -88,7 +91,6 @@
#define HOST_STATUS_ABORT_RSU 0x2
#define rsu_prog(doorbell) FIELD_GET(DRBL_RSU_PROGRESS, doorbell)
#define rsu_stat(doorbell) FIELD_GET(DRBL_RSU_STATUS, doorbell)
/* interval 100ms and timeout 5s */
#define NIOS_HANDSHAKE_INTERVAL_US (100 * 1000)
@ -103,29 +105,145 @@
#define RSU_COMPLETE_TIMEOUT_MS (40 * 60 * 1000)
/* Addresses for security related data in FLASH */
#define BMC_REH_ADDR 0x17ffc004
#define BMC_PROG_ADDR 0x17ffc000
#define BMC_PROG_MAGIC 0x5746
#define M10BMC_N3000_BMC_REH_ADDR 0x17ffc004
#define M10BMC_N3000_BMC_PROG_ADDR 0x17ffc000
#define M10BMC_N3000_BMC_PROG_MAGIC 0x5746
#define SR_REH_ADDR 0x17ffd004
#define SR_PROG_ADDR 0x17ffd000
#define SR_PROG_MAGIC 0x5253
#define M10BMC_N3000_SR_REH_ADDR 0x17ffd004
#define M10BMC_N3000_SR_PROG_ADDR 0x17ffd000
#define M10BMC_N3000_SR_PROG_MAGIC 0x5253
#define PR_REH_ADDR 0x17ffe004
#define PR_PROG_ADDR 0x17ffe000
#define PR_PROG_MAGIC 0x5250
#define M10BMC_N3000_PR_REH_ADDR 0x17ffe004
#define M10BMC_N3000_PR_PROG_ADDR 0x17ffe000
#define M10BMC_N3000_PR_PROG_MAGIC 0x5250
/* Address of 4KB inverted bit vector containing staging area FLASH count */
#define STAGING_FLASH_COUNT 0x17ffb000
#define M10BMC_N3000_STAGING_FLASH_COUNT 0x17ffb000
#define M10BMC_N6000_INDIRECT_BASE 0x400
#define M10BMC_N6000_SYS_BASE 0x0
#define M10BMC_N6000_SYS_END 0xfff
#define M10BMC_N6000_DOORBELL 0x1c0
#define M10BMC_N6000_AUTH_RESULT 0x1c4
#define AUTH_RESULT_RSU_STATUS GENMASK(23, 16)
#define M10BMC_N6000_BUILD_VER 0x0
#define NIOS2_N6000_FW_VERSION 0x4
#define M10BMC_N6000_MAC_LOW 0x20
#define M10BMC_N6000_MAC_HIGH (M10BMC_N6000_MAC_LOW + 4)
/* Addresses for security related data in FLASH */
#define M10BMC_N6000_BMC_REH_ADDR 0x7ffc004
#define M10BMC_N6000_BMC_PROG_ADDR 0x7ffc000
#define M10BMC_N6000_BMC_PROG_MAGIC 0x5746
#define M10BMC_N6000_SR_REH_ADDR 0x7ffd004
#define M10BMC_N6000_SR_PROG_ADDR 0x7ffd000
#define M10BMC_N6000_SR_PROG_MAGIC 0x5253
#define M10BMC_N6000_PR_REH_ADDR 0x7ffe004
#define M10BMC_N6000_PR_PROG_ADDR 0x7ffe000
#define M10BMC_N6000_PR_PROG_MAGIC 0x5250
#define M10BMC_N6000_STAGING_FLASH_COUNT 0x7ff5000
#define M10BMC_N6000_FLASH_MUX_CTRL 0x1d0
#define M10BMC_N6000_FLASH_MUX_SELECTION GENMASK(2, 0)
#define M10BMC_N6000_FLASH_MUX_IDLE 0
#define M10BMC_N6000_FLASH_MUX_NIOS 1
#define M10BMC_N6000_FLASH_MUX_HOST 2
#define M10BMC_N6000_FLASH_MUX_PFL 4
#define get_flash_mux(mux) FIELD_GET(M10BMC_N6000_FLASH_MUX_SELECTION, mux)
#define M10BMC_N6000_FLASH_NIOS_REQUEST BIT(4)
#define M10BMC_N6000_FLASH_HOST_REQUEST BIT(5)
#define M10BMC_N6000_FLASH_CTRL 0x40
#define M10BMC_N6000_FLASH_WR_MODE BIT(0)
#define M10BMC_N6000_FLASH_RD_MODE BIT(1)
#define M10BMC_N6000_FLASH_BUSY BIT(2)
#define M10BMC_N6000_FLASH_FIFO_SPACE GENMASK(13, 4)
#define M10BMC_N6000_FLASH_READ_COUNT GENMASK(25, 16)
#define M10BMC_N6000_FLASH_ADDR 0x44
#define M10BMC_N6000_FLASH_FIFO 0x800
#define M10BMC_N6000_READ_BLOCK_SIZE 0x800
#define M10BMC_N6000_FIFO_MAX_BYTES 0x800
#define M10BMC_N6000_FIFO_WORD_SIZE 4
#define M10BMC_N6000_FIFO_MAX_WORDS (M10BMC_N6000_FIFO_MAX_BYTES / \
M10BMC_N6000_FIFO_WORD_SIZE)
#define M10BMC_FLASH_INT_US 1
#define M10BMC_FLASH_TIMEOUT_US 10000
/**
* struct m10bmc_csr_map - Intel MAX 10 BMC CSR register map
*/
struct m10bmc_csr_map {
unsigned int base;
unsigned int build_version;
unsigned int fw_version;
unsigned int mac_low;
unsigned int mac_high;
unsigned int doorbell;
unsigned int auth_result;
unsigned int bmc_prog_addr;
unsigned int bmc_reh_addr;
unsigned int bmc_magic;
unsigned int sr_prog_addr;
unsigned int sr_reh_addr;
unsigned int sr_magic;
unsigned int pr_prog_addr;
unsigned int pr_reh_addr;
unsigned int pr_magic;
unsigned int rsu_update_counter;
};
/**
* struct intel_m10bmc_platform_info - Intel MAX 10 BMC platform specific information
* @cells: MFD cells
* @n_cells: MFD cells ARRAY_SIZE()
* @csr_map: the mappings for register definition of MAX10 BMC
*/
struct intel_m10bmc_platform_info {
struct mfd_cell *cells;
int n_cells;
const struct m10bmc_csr_map *csr_map;
};
struct intel_m10bmc;
/**
* struct intel_m10bmc_flash_bulk_ops - device specific operations for flash R/W
* @read: read a block of data from flash
* @write: write a block of data to flash
* @lock_write: locks flash access for erase+write
* @unlock_write: unlock flash access
*
* Write must be protected with @lock_write and @unlock_write. While the flash
* is locked, @read returns -EBUSY.
*/
struct intel_m10bmc_flash_bulk_ops {
int (*read)(struct intel_m10bmc *m10bmc, u8 *buf, u32 addr, u32 size);
int (*write)(struct intel_m10bmc *m10bmc, const u8 *buf, u32 offset, u32 size);
int (*lock_write)(struct intel_m10bmc *m10bmc);
void (*unlock_write)(struct intel_m10bmc *m10bmc);
};
/**
* struct intel_m10bmc - Intel MAX 10 BMC parent driver data structure
* @dev: this device
* @regmap: the regmap used to access registers by m10bmc itself
* @info: the platform information for MAX10 BMC
* @flash_bulk_ops: optional device specific operations for flash R/W
*/
struct intel_m10bmc {
struct device *dev;
struct regmap *regmap;
const struct intel_m10bmc_platform_info *info;
const struct intel_m10bmc_flash_bulk_ops *flash_bulk_ops;
};
/*
@ -152,11 +270,22 @@ m10bmc_raw_read(struct intel_m10bmc *m10bmc, unsigned int addr,
* The base of the system registers could be configured by HW developers, and
* in HW SPEC, the base is not added to the addresses of the system registers.
*
* This macro helps to simplify the accessing of the system registers. And if
* This function helps to simplify the accessing of the system registers. And if
* the base is reconfigured in HW, SW developers could simply change the
* M10BMC_SYS_BASE accordingly.
* csr_map's base accordingly.
*/
#define m10bmc_sys_read(m10bmc, offset, val) \
m10bmc_raw_read(m10bmc, M10BMC_SYS_BASE + (offset), val)
static inline int m10bmc_sys_read(struct intel_m10bmc *m10bmc, unsigned int offset,
unsigned int *val)
{
const struct m10bmc_csr_map *csr_map = m10bmc->info->csr_map;
return m10bmc_raw_read(m10bmc, csr_map->base + offset, val);
}
/*
* MAX10 BMC Core support
*/
int m10bmc_dev_init(struct intel_m10bmc *m10bmc, const struct intel_m10bmc_platform_info *info);
extern const struct attribute_group *m10bmc_dev_groups[];
#endif /* __MFD_INTEL_M10_BMC_H */