// SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ #include #include #include #include #include #include #include "cxlmem.h" #include "cxl.h" static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype) { struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_dev_state *cxlds = cxlmd->cxlds; unsigned long security_flags = 0; u32 sec_out; int rc; rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SECURITY_STATE, NULL, 0, &sec_out, sizeof(sec_out)); if (rc < 0) return 0; if (ptype == NVDIMM_MASTER) { if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags); else set_bit(NVDIMM_SECURITY_DISABLED, &security_flags); if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) set_bit(NVDIMM_SECURITY_FROZEN, &security_flags); return security_flags; } if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET) { if (sec_out & CXL_PMEM_SEC_STATE_FROZEN || sec_out & CXL_PMEM_SEC_STATE_USER_PLIMIT) set_bit(NVDIMM_SECURITY_FROZEN, &security_flags); if (sec_out & CXL_PMEM_SEC_STATE_LOCKED) set_bit(NVDIMM_SECURITY_LOCKED, &security_flags); else set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags); } else { set_bit(NVDIMM_SECURITY_DISABLED, &security_flags); } return security_flags; } static int cxl_pmem_security_change_key(struct nvdimm *nvdimm, const struct nvdimm_key_data *old_data, const struct nvdimm_key_data *new_data, enum nvdimm_passphrase_type ptype) { struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_set_pass set_pass; int rc; set_pass.type = ptype == NVDIMM_MASTER ? CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER; memcpy(set_pass.old_pass, old_data->data, NVDIMM_PASSPHRASE_LEN); memcpy(set_pass.new_pass, new_data->data, NVDIMM_PASSPHRASE_LEN); rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_PASSPHRASE, &set_pass, sizeof(set_pass), NULL, 0); return rc; } static int cxl_pmem_security_disable(struct nvdimm *nvdimm, const struct nvdimm_key_data *key_data) { struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_disable_pass dis_pass; int rc; dis_pass.type = CXL_PMEM_SEC_PASS_USER; memcpy(dis_pass.pass, key_data->data, NVDIMM_PASSPHRASE_LEN); rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_DISABLE_PASSPHRASE, &dis_pass, sizeof(dis_pass), NULL, 0); return rc; } static int cxl_pmem_security_freeze(struct nvdimm *nvdimm) { struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_dev_state *cxlds = cxlmd->cxlds; return cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_FREEZE_SECURITY, NULL, 0, NULL, 0); } static int cxl_pmem_security_unlock(struct nvdimm *nvdimm, const struct nvdimm_key_data *key_data) { struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_dev_state *cxlds = cxlmd->cxlds; u8 pass[NVDIMM_PASSPHRASE_LEN]; int rc; if (!cpu_cache_has_invalidate_memregion()) return -EINVAL; memcpy(pass, key_data->data, NVDIMM_PASSPHRASE_LEN); rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_UNLOCK, pass, NVDIMM_PASSPHRASE_LEN, NULL, 0); if (rc < 0) return rc; /* DIMM unlocked, invalidate all CPU caches before we read it */ cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); return 0; } static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm, const struct nvdimm_key_data *key, enum nvdimm_passphrase_type ptype) { struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_pass_erase erase; int rc; if (!cpu_cache_has_invalidate_memregion()) return -EINVAL; erase.type = ptype == NVDIMM_MASTER ? CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER; memcpy(erase.pass, key->data, NVDIMM_PASSPHRASE_LEN); /* Flush all cache before we erase mem device */ cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE, &erase, sizeof(erase), NULL, 0); if (rc < 0) return rc; /* mem device erased, invalidate all CPU caches before data is read */ cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); return 0; } static const struct nvdimm_security_ops __cxl_security_ops = { .get_flags = cxl_pmem_get_security_flags, .change_key = cxl_pmem_security_change_key, .disable = cxl_pmem_security_disable, .freeze = cxl_pmem_security_freeze, .unlock = cxl_pmem_security_unlock, .erase = cxl_pmem_security_passphrase_erase, }; const struct nvdimm_security_ops *cxl_security_ops = &__cxl_security_ops; MODULE_IMPORT_NS(DEVMEM);