Merge branch 'nvme-4.13' of git://git.infradead.org/nvme into for-4.13/block

Pull NVMe changes for 4.13 from Christoph:

Highlights:

 - UUID identifier support from Johannes
 - Lots of cleanups from Sagi
 - Host Memory Buffer support from me

And lots of cleanups and smaller fixes of course.

Note that the UUID identifier changes are based on top of the uuid tree.
I am the maintainer of that tree and will send it to Linus as soon as
4.12 is released as various other trees depend on it as well (and the
diffstat includes those changes unfortunately)
This commit is contained in:
Jens Axboe 2017-06-16 10:14:59 -06:00
commit c27b2d634f
89 changed files with 1176 additions and 936 deletions

View file

@ -13462,6 +13462,17 @@ W: http://en.wikipedia.org/wiki/Util-linux
T: git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git
S: Maintained
UUID HELPERS
M: Christoph Hellwig <hch@lst.de>
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
L: linux-kernel@vger.kernel.org
T: git git://git.infradead.org/users/hch/uuid.git
F: lib/uuid.c
F: lib/test_uuid.c
F: include/linux/uuid.h
F: include/uapi/linux/uuid.h
S: Maintained
UVESAFB DRIVER
M: Michal Januszewski <spock@gentoo.org>
L: linux-fbdev@vger.kernel.org

View file

@ -109,7 +109,7 @@ struct sysinfo_2_2_2 {
unsigned short cpus_shared;
char reserved_4[3];
unsigned char vsne;
uuid_be uuid;
uuid_t uuid;
char reserved_5[160];
char ext_name[256];
};
@ -134,7 +134,7 @@ struct sysinfo_3_2_2 {
char reserved_1[3];
unsigned char evmne;
unsigned int reserved_2;
uuid_be uuid;
uuid_t uuid;
} vm[8];
char reserved_3[1504];
char ext_names[8][256];

View file

@ -242,7 +242,7 @@ static void print_ext_name(struct seq_file *m, int lvl,
static void print_uuid(struct seq_file *m, int i, struct sysinfo_3_2_2 *info)
{
if (!memcmp(&info->vm[i].uuid, &NULL_UUID_BE, sizeof(uuid_be)))
if (uuid_is_null(&info->vm[i].uuid))
return;
seq_printf(m, "VM%02d UUID: %pUb\n", i, &info->vm[i].uuid);
}

View file

@ -115,7 +115,7 @@ static bool ldm_parse_privhead(const u8 *data, struct privhead *ph)
ldm_error("PRIVHEAD disk size doesn't match real disk size");
return false;
}
if (uuid_be_to_bin(data + 0x0030, (uuid_be *)ph->disk_id)) {
if (uuid_parse(data + 0x0030, &ph->disk_id)) {
ldm_error("PRIVHEAD contains an invalid GUID.");
return false;
}
@ -234,7 +234,7 @@ static bool ldm_compare_privheads (const struct privhead *ph1,
(ph1->logical_disk_size == ph2->logical_disk_size) &&
(ph1->config_start == ph2->config_start) &&
(ph1->config_size == ph2->config_size) &&
!memcmp (ph1->disk_id, ph2->disk_id, GUID_SIZE));
uuid_equal(&ph1->disk_id, &ph2->disk_id));
}
/**
@ -557,7 +557,7 @@ static struct vblk * ldm_get_disk_objid (const struct ldmdb *ldb)
list_for_each (item, &ldb->v_disk) {
struct vblk *v = list_entry (item, struct vblk, list);
if (!memcmp (v->vblk.disk.disk_id, ldb->ph.disk_id, GUID_SIZE))
if (uuid_equal(&v->vblk.disk.disk_id, &ldb->ph.disk_id))
return v;
}
@ -892,7 +892,7 @@ static bool ldm_parse_dsk3 (const u8 *buffer, int buflen, struct vblk *vb)
disk = &vb->vblk.disk;
ldm_get_vstr (buffer + 0x18 + r_diskid, disk->alt_name,
sizeof (disk->alt_name));
if (uuid_be_to_bin(buffer + 0x19 + r_name, (uuid_be *)disk->disk_id))
if (uuid_parse(buffer + 0x19 + r_name, &disk->disk_id))
return false;
return true;
@ -927,7 +927,7 @@ static bool ldm_parse_dsk4 (const u8 *buffer, int buflen, struct vblk *vb)
return false;
disk = &vb->vblk.disk;
memcpy (disk->disk_id, buffer + 0x18 + r_name, GUID_SIZE);
uuid_copy(&disk->disk_id, (uuid_t *)(buffer + 0x18 + r_name));
return true;
}

View file

@ -112,8 +112,6 @@ struct frag { /* VBLK Fragment handling */
/* In memory LDM database structures. */
#define GUID_SIZE 16
struct privhead { /* Offsets and sizes are in sectors. */
u16 ver_major;
u16 ver_minor;
@ -121,7 +119,7 @@ struct privhead { /* Offsets and sizes are in sectors. */
u64 logical_disk_size;
u64 config_start;
u64 config_size;
u8 disk_id[GUID_SIZE];
uuid_t disk_id;
};
struct tocblock { /* We have exactly two bitmaps. */
@ -154,7 +152,7 @@ struct vblk_dgrp { /* VBLK Disk Group */
};
struct vblk_disk { /* VBLK Disk */
u8 disk_id[GUID_SIZE];
uuid_t disk_id;
u8 alt_name[128];
};

View file

@ -141,9 +141,9 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
int cpu = mce->extcpu;
struct acpi_hest_generic_status *estatus, *tmp;
struct acpi_hest_generic_data *gdata;
const uuid_le *fru_id = &NULL_UUID_LE;
const guid_t *fru_id = &guid_null;
char *fru_text = "";
uuid_le *sec_type;
guid_t *sec_type;
static u32 err_seq;
estatus = extlog_elog_entry_check(cpu, bank);
@ -165,11 +165,11 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
err_seq++;
gdata = (struct acpi_hest_generic_data *)(tmp + 1);
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
fru_id = (uuid_le *)gdata->fru_id;
fru_id = (guid_t *)gdata->fru_id;
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
fru_text = gdata->fru_text;
sec_type = (uuid_le *)gdata->section_type;
if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
sec_type = (guid_t *)gdata->section_type;
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem = (void *)(gdata + 1);
if (gdata->error_data_length >= sizeof(*mem))
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
@ -182,17 +182,17 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
static bool __init extlog_get_l1addr(void)
{
u8 uuid[16];
guid_t guid;
acpi_handle handle;
union acpi_object *obj;
acpi_str_to_uuid(extlog_dsm_uuid, uuid);
if (guid_parse(extlog_dsm_uuid, &guid))
return false;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
return false;
if (!acpi_check_dsm(handle, uuid, EXTLOG_DSM_REV, 1 << EXTLOG_FN_ADDR))
if (!acpi_check_dsm(handle, &guid, EXTLOG_DSM_REV, 1 << EXTLOG_FN_ADDR))
return false;
obj = acpi_evaluate_dsm_typed(handle, uuid, EXTLOG_DSM_REV,
obj = acpi_evaluate_dsm_typed(handle, &guid, EXTLOG_DSM_REV,
EXTLOG_FN_ADDR, NULL, ACPI_TYPE_INTEGER);
if (!obj) {
return false;

View file

@ -431,12 +431,13 @@ static void ghes_do_proc(struct ghes *ghes,
{
int sev, sec_sev;
struct acpi_hest_generic_data *gdata;
guid_t *sec_type;
sev = ghes_severity(estatus->error_severity);
apei_estatus_for_each_section(estatus, gdata) {
sec_type = (guid_t *)gdata->section_type;
sec_sev = ghes_severity(gdata->error_severity);
if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
CPER_SEC_PLATFORM_MEM)) {
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err;
mem_err = (struct cper_sec_mem_err *)(gdata+1);
ghes_edac_report_mem_error(ghes, sev, mem_err);
@ -445,8 +446,7 @@ static void ghes_do_proc(struct ghes *ghes,
ghes_handle_memory_failure(gdata, sev);
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
CPER_SEC_PCIE)) {
else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
struct cper_sec_pcie *pcie_err;
pcie_err = (struct cper_sec_pcie *)(gdata+1);
if (sev == GHES_SEV_RECOVERABLE &&

View file

@ -196,42 +196,19 @@ static void acpi_print_osc_error(acpi_handle handle,
pr_debug("\n");
}
acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
{
int i;
static int opc_map_to_uuid[16] = {6, 4, 2, 0, 11, 9, 16, 14, 19, 21,
24, 26, 28, 30, 32, 34};
if (strlen(str) != 36)
return AE_BAD_PARAMETER;
for (i = 0; i < 36; i++) {
if (i == 8 || i == 13 || i == 18 || i == 23) {
if (str[i] != '-')
return AE_BAD_PARAMETER;
} else if (!isxdigit(str[i]))
return AE_BAD_PARAMETER;
}
for (i = 0; i < 16; i++) {
uuid[i] = hex_to_bin(str[opc_map_to_uuid[i]]) << 4;
uuid[i] |= hex_to_bin(str[opc_map_to_uuid[i] + 1]);
}
return AE_OK;
}
EXPORT_SYMBOL_GPL(acpi_str_to_uuid);
acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
{
acpi_status status;
struct acpi_object_list input;
union acpi_object in_params[4];
union acpi_object *out_obj;
u8 uuid[16];
guid_t guid;
u32 errors;
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
if (!context)
return AE_ERROR;
if (ACPI_FAILURE(acpi_str_to_uuid(context->uuid_str, uuid)))
if (guid_parse(context->uuid_str, &guid))
return AE_ERROR;
context->ret.length = ACPI_ALLOCATE_BUFFER;
context->ret.pointer = NULL;
@ -241,7 +218,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
input.pointer = in_params;
in_params[0].type = ACPI_TYPE_BUFFER;
in_params[0].buffer.length = 16;
in_params[0].buffer.pointer = uuid;
in_params[0].buffer.pointer = (u8 *)&guid;
in_params[1].type = ACPI_TYPE_INTEGER;
in_params[1].integer.value = context->rev;
in_params[2].type = ACPI_TYPE_INTEGER;

View file

@ -74,11 +74,11 @@ struct nfit_table_prev {
struct list_head flushes;
};
static u8 nfit_uuid[NFIT_UUID_MAX][16];
static guid_t nfit_uuid[NFIT_UUID_MAX];
const u8 *to_nfit_uuid(enum nfit_uuids id)
const guid_t *to_nfit_uuid(enum nfit_uuids id)
{
return nfit_uuid[id];
return &nfit_uuid[id];
}
EXPORT_SYMBOL(to_nfit_uuid);
@ -222,7 +222,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
u32 offset, fw_status = 0;
acpi_handle handle;
unsigned int func;
const u8 *uuid;
const guid_t *guid;
int rc, i;
func = cmd;
@ -245,7 +245,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
cmd_mask = nvdimm_cmd_mask(nvdimm);
dsm_mask = nfit_mem->dsm_mask;
desc = nd_cmd_dimm_desc(cmd);
uuid = to_nfit_uuid(nfit_mem->family);
guid = to_nfit_uuid(nfit_mem->family);
handle = adev->handle;
} else {
struct acpi_device *adev = to_acpi_dev(acpi_desc);
@ -254,7 +254,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
cmd_mask = nd_desc->cmd_mask;
dsm_mask = cmd_mask;
desc = nd_cmd_bus_desc(cmd);
uuid = to_nfit_uuid(NFIT_DEV_BUS);
guid = to_nfit_uuid(NFIT_DEV_BUS);
handle = adev->handle;
dimm_name = "bus";
}
@ -289,7 +289,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
in_buf.buffer.pointer,
min_t(u32, 256, in_buf.buffer.length), true);
out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
out_obj = acpi_evaluate_dsm(handle, guid, 1, func, &in_obj);
if (!out_obj) {
dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
cmd_name);
@ -409,7 +409,7 @@ int nfit_spa_type(struct acpi_nfit_system_address *spa)
int i;
for (i = 0; i < NFIT_UUID_MAX; i++)
if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
return i;
return -1;
}
@ -1415,7 +1415,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
struct acpi_device *adev, *adev_dimm;
struct device *dev = acpi_desc->dev;
unsigned long dsm_mask;
const u8 *uuid;
const guid_t *guid;
int i;
int family = -1;
@ -1444,7 +1444,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
/*
* Until standardization materializes we need to consider 4
* different command sets. Note, that checking for function0 (bit0)
* tells us if any commands are reachable through this uuid.
* tells us if any commands are reachable through this GUID.
*/
for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
@ -1474,9 +1474,9 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
return 0;
}
uuid = to_nfit_uuid(nfit_mem->family);
guid = to_nfit_uuid(nfit_mem->family);
for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
if (acpi_check_dsm(adev_dimm->handle, guid, 1, 1ULL << i))
set_bit(i, &nfit_mem->dsm_mask);
return 0;
@ -1611,7 +1611,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
{
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
struct acpi_device *adev;
int i;
@ -1621,7 +1621,7 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
return;
for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
set_bit(i, &nd_desc->cmd_mask);
}
@ -3051,19 +3051,19 @@ static __init int nfit_init(void)
BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
nfit_wq = create_singlethread_workqueue("nfit");
if (!nfit_wq)

View file

@ -18,7 +18,6 @@
#include <linux/libnvdimm.h>
#include <linux/ndctl.h>
#include <linux/types.h>
#include <linux/uuid.h>
#include <linux/acpi.h>
#include <acpi/acuuid.h>
@ -237,7 +236,7 @@ static inline struct acpi_nfit_desc *to_acpi_desc(
return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
}
const u8 *to_nfit_uuid(enum nfit_uuids id);
const guid_t *to_nfit_uuid(enum nfit_uuids id);
int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz);
void acpi_nfit_shutdown(void *data);
void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event);

View file

@ -613,19 +613,19 @@ acpi_status acpi_evaluate_lck(acpi_handle handle, int lock)
/**
* acpi_evaluate_dsm - evaluate device's _DSM method
* @handle: ACPI device handle
* @uuid: UUID of requested functions, should be 16 bytes
* @guid: GUID of requested functions, should be 16 bytes
* @rev: revision number of requested function
* @func: requested function number
* @argv4: the function specific parameter
*
* Evaluate device's _DSM method with specified UUID, revision id and
* Evaluate device's _DSM method with specified GUID, revision id and
* function number. Caller needs to free the returned object.
*
* Though ACPI defines the fourth parameter for _DSM should be a package,
* some old BIOSes do expect a buffer or an integer etc.
*/
union acpi_object *
acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 func,
union acpi_object *argv4)
{
acpi_status ret;
@ -638,7 +638,7 @@ acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
params[0].type = ACPI_TYPE_BUFFER;
params[0].buffer.length = 16;
params[0].buffer.pointer = (char *)uuid;
params[0].buffer.pointer = (u8 *)guid;
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = rev;
params[2].type = ACPI_TYPE_INTEGER;
@ -666,7 +666,7 @@ EXPORT_SYMBOL(acpi_evaluate_dsm);
/**
* acpi_check_dsm - check if _DSM method supports requested functions.
* @handle: ACPI device handle
* @uuid: UUID of requested functions, should be 16 bytes at least
* @guid: GUID of requested functions, should be 16 bytes at least
* @rev: revision number of requested functions
* @funcs: bitmap of requested functions
*
@ -674,7 +674,7 @@ EXPORT_SYMBOL(acpi_evaluate_dsm);
* functions. Currently only support 64 functions at maximum, should be
* enough for now.
*/
bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs)
{
int i;
u64 mask = 0;
@ -683,7 +683,7 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
if (funcs == 0)
return false;
obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL);
obj = acpi_evaluate_dsm(handle, guid, rev, 0, NULL);
if (!obj)
return false;
@ -697,7 +697,7 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
/*
* Bit 0 indicates whether there's support for any functions other than
* function 0 for the specified UUID and revision.
* function 0 for the specified GUID and revision.
*/
if ((mask & 0x1) && (mask & funcs) == funcs)
return true;

View file

@ -27,10 +27,9 @@
#define ACPI_SIG_TPM2 "TPM2"
static const u8 CRB_ACPI_START_UUID[] = {
/* 0000 */ 0xAB, 0x6C, 0xBF, 0x6B, 0x63, 0x54, 0x14, 0x47,
/* 0008 */ 0xB7, 0xCD, 0xF0, 0x20, 0x3C, 0x03, 0x68, 0xD4
};
static const guid_t crb_acpi_start_guid =
GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
0xB7, 0xCD, 0xF0, 0x20, 0x3C, 0x03, 0x68, 0xD4);
enum crb_defaults {
CRB_ACPI_START_REVISION_ID = 1,
@ -266,7 +265,7 @@ static int crb_do_acpi_start(struct tpm_chip *chip)
int rc;
obj = acpi_evaluate_dsm(chip->acpi_dev_handle,
CRB_ACPI_START_UUID,
&crb_acpi_start_guid,
CRB_ACPI_START_REVISION_ID,
CRB_ACPI_START_INDEX,
NULL);

View file

@ -32,20 +32,16 @@
#define PPI_VS_REQ_START 128
#define PPI_VS_REQ_END 255
static const u8 tpm_ppi_uuid[] = {
0xA6, 0xFA, 0xDD, 0x3D,
0x1B, 0x36,
0xB4, 0x4E,
0xA4, 0x24,
0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53
};
static const guid_t tpm_ppi_guid =
GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4,
0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53);
static inline union acpi_object *
tpm_eval_dsm(acpi_handle ppi_handle, int func, acpi_object_type type,
union acpi_object *argv4)
{
BUG_ON(!ppi_handle);
return acpi_evaluate_dsm_typed(ppi_handle, tpm_ppi_uuid,
return acpi_evaluate_dsm_typed(ppi_handle, &tpm_ppi_guid,
TPM_PPI_REVISION_ID,
func, argv4, type);
}
@ -107,7 +103,7 @@ static ssize_t tpm_store_ppi_request(struct device *dev,
* is updated with function index from SUBREQ to SUBREQ2 since PPI
* version 1.1
*/
if (acpi_check_dsm(chip->acpi_dev_handle, tpm_ppi_uuid,
if (acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_SUBREQ2))
func = TPM_PPI_FN_SUBREQ2;
@ -268,7 +264,7 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
"User not required",
};
if (!acpi_check_dsm(dev_handle, tpm_ppi_uuid, TPM_PPI_REVISION_ID,
if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID,
1 << TPM_PPI_FN_GETOPR))
return -EPERM;
@ -341,12 +337,12 @@ void tpm_add_ppi(struct tpm_chip *chip)
if (!chip->acpi_dev_handle)
return;
if (!acpi_check_dsm(chip->acpi_dev_handle, tpm_ppi_uuid,
if (!acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_VERSION))
return;
/* Cache PPI version string. */
obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, tpm_ppi_uuid,
obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, &tpm_ppi_guid,
TPM_PPI_REVISION_ID, TPM_PPI_FN_VERSION,
NULL, ACPI_TYPE_STRING);
if (obj) {

View file

@ -15,13 +15,9 @@ static struct intel_dsm_priv {
acpi_handle dhandle;
} intel_dsm_priv;
static const u8 intel_dsm_guid[] = {
0xd3, 0x73, 0xd8, 0x7e,
0xd0, 0xc2,
0x4f, 0x4e,
0xa8, 0x54,
0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
};
static const guid_t intel_dsm_guid =
GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f,
0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c);
static char *intel_dsm_port_name(u8 id)
{
@ -80,7 +76,7 @@ static void intel_dsm_platform_mux_info(void)
int i;
union acpi_object *pkg, *connector_count;
pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, intel_dsm_guid,
pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, &intel_dsm_guid,
INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
NULL, ACPI_TYPE_PACKAGE);
if (!pkg) {
@ -118,7 +114,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
if (!dhandle)
return false;
if (!acpi_check_dsm(dhandle, intel_dsm_guid, INTEL_DSM_REVISION_ID,
if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID,
1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
DRM_DEBUG_KMS("no _DSM method for intel device\n");
return false;

View file

@ -60,15 +60,13 @@ bool nouveau_is_v1_dsm(void) {
}
#ifdef CONFIG_VGA_SWITCHEROO
static const char nouveau_dsm_muid[] = {
0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
};
static const guid_t nouveau_dsm_muid =
GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48,
0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4);
static const char nouveau_op_dsm_muid[] = {
0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47,
0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0,
};
static const guid_t nouveau_op_dsm_muid =
GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B,
0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0);
static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
{
@ -86,7 +84,7 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
args_buff[i] = (arg >> i * 8) & 0xFF;
*result = 0;
obj = acpi_evaluate_dsm_typed(handle, nouveau_op_dsm_muid, 0x00000100,
obj = acpi_evaluate_dsm_typed(handle, &nouveau_op_dsm_muid, 0x00000100,
func, &argv4, ACPI_TYPE_BUFFER);
if (!obj) {
acpi_handle_info(handle, "failed to evaluate _DSM\n");
@ -138,7 +136,7 @@ static int nouveau_dsm(acpi_handle handle, int func, int arg)
.integer.value = arg,
};
obj = acpi_evaluate_dsm_typed(handle, nouveau_dsm_muid, 0x00000102,
obj = acpi_evaluate_dsm_typed(handle, &nouveau_dsm_muid, 0x00000102,
func, &argv4, ACPI_TYPE_INTEGER);
if (!obj) {
acpi_handle_info(handle, "failed to evaluate _DSM\n");
@ -259,7 +257,7 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
if (!acpi_has_method(dhandle, "_DSM"))
return;
supports_mux = acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
supports_mux = acpi_check_dsm(dhandle, &nouveau_dsm_muid, 0x00000102,
1 << NOUVEAU_DSM_POWER);
optimus_funcs = nouveau_dsm_get_optimus_functions(dhandle);

View file

@ -81,10 +81,9 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
{
struct nvkm_subdev *subdev = &mxm->subdev;
struct nvkm_device *device = subdev->device;
static char muid[] = {
0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
};
static guid_t muid =
GUID_INIT(0x4004A400, 0x917D, 0x4CF2,
0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65);
u32 mxms_args[] = { 0x00000000 };
union acpi_object argv4 = {
.buffer.type = ACPI_TYPE_BUFFER,
@ -105,7 +104,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
* unless you pass in exactly the version it supports..
*/
rev = (version & 0xf0) << 4 | (version & 0x0f);
obj = acpi_evaluate_dsm(handle, muid, rev, 0x00000010, &argv4);
obj = acpi_evaluate_dsm(handle, &muid, rev, 0x00000010, &argv4);
if (!obj) {
nvkm_debug(subdev, "DSM MXMS failed\n");
return false;

View file

@ -872,10 +872,9 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
static int i2c_hid_acpi_pdata(struct i2c_client *client,
struct i2c_hid_platform_data *pdata)
{
static u8 i2c_hid_guid[] = {
0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45,
0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE,
};
static guid_t i2c_hid_guid =
GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
union acpi_object *obj;
struct acpi_device *adev;
acpi_handle handle;
@ -884,7 +883,7 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
if (!handle || acpi_bus_get_device(handle, &adev))
return -ENODEV;
obj = acpi_evaluate_dsm_typed(handle, i2c_hid_guid, 1, 1, NULL,
obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
ACPI_TYPE_INTEGER);
if (!obj) {
dev_err(&client->dev, "device _DSM execution failed\n");

View file

@ -1808,10 +1808,9 @@ IOMMU_INIT_POST(detect_intel_iommu);
* for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
* "Remapping Hardware Unit Hot Plug".
*/
static u8 dmar_hp_uuid[] = {
/* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
/* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
};
static guid_t dmar_hp_guid =
GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
/*
* Currently there's only one revision and BIOS will not check the revision id,
@ -1824,7 +1823,7 @@ static u8 dmar_hp_uuid[] = {
static inline bool dmar_detect_dsm(acpi_handle handle, int func)
{
return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
}
static int dmar_walk_dsm_resource(acpi_handle handle, int func,
@ -1843,7 +1842,7 @@ static int dmar_walk_dsm_resource(acpi_handle handle, int func,
if (!dmar_detect_dsm(handle, func))
return 0;
obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
func, NULL, ACPI_TYPE_BUFFER);
if (!obj)
return -ENODEV;

View file

@ -825,7 +825,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
return -EINVAL;
}
static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
return sb1->set_uuid0 == sb2->set_uuid0 &&
sb1->set_uuid1 == sb2->set_uuid1 &&
@ -833,7 +833,7 @@ static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
sb1->set_uuid3 == sb2->set_uuid3;
}
static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
int ret;
mdp_super_t *tmp1, *tmp2;
@ -1025,12 +1025,12 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
} else {
__u64 ev1, ev2;
mdp_super_t *refsb = page_address(refdev->sb_page);
if (!uuid_equal(refsb, sb)) {
if (!md_uuid_equal(refsb, sb)) {
pr_warn("md: %s has different UUID to %s\n",
b, bdevname(refdev->bdev,b2));
goto abort;
}
if (!sb_equal(refsb, sb)) {
if (!md_sb_equal(refsb, sb)) {
pr_warn("md: %s has same UUID but different superblock to %s\n",
b, bdevname(refdev->bdev, b2));
goto abort;

View file

@ -404,10 +404,9 @@ struct intel_host {
bool d3_retune;
};
const u8 intel_dsm_uuid[] = {
0xA5, 0x3E, 0xC1, 0xF6, 0xCD, 0x65, 0x1F, 0x46,
0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61,
};
const guid_t intel_dsm_guid =
GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
@ -416,7 +415,7 @@ static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
int err = 0;
size_t len;
obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), intel_dsm_uuid, 0, fn, NULL);
obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
if (!obj)
return -EOPNOTSUPP;

View file

@ -29,10 +29,9 @@ enum _dsm_rst_type {
HNS_ROCE_RESET_FUNC = 0x7,
};
const u8 hns_dsaf_acpi_dsm_uuid[] = {
0x1A, 0xAA, 0x85, 0x1A, 0x93, 0xE2, 0x5E, 0x41,
0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A
};
const guid_t hns_dsaf_acpi_dsm_guid =
GUID_INIT(0x1A85AA1A, 0xE293, 0x415E,
0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A);
static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
{
@ -151,7 +150,7 @@ static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
argv4.package.elements = obj_args;
obj = acpi_evaluate_dsm(ACPI_HANDLE(dsaf_dev->dev),
hns_dsaf_acpi_dsm_uuid, 0, op_type, &argv4);
&hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
if (!obj) {
dev_warn(dsaf_dev->dev, "reset port_type%d port%d fail!",
port_type, port);
@ -434,7 +433,7 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
argv4.package.elements = &obj_args,
obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
hns_dsaf_acpi_dsm_uuid, 0,
&hns_dsaf_acpi_dsm_guid, 0,
HNS_OP_GET_PORT_TYPE_FUNC, &argv4);
if (!obj || obj->type != ACPI_TYPE_INTEGER)
@ -474,7 +473,7 @@ int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
argv4.package.elements = &obj_args,
obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
hns_dsaf_acpi_dsm_uuid, 0,
&hns_dsaf_acpi_dsm_guid, 0,
HNS_OP_GET_SFP_STAT_FUNC, &argv4);
if (!obj || obj->type != ACPI_TYPE_INTEGER)
@ -565,7 +564,7 @@ hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en)
argv4.package.elements = obj_args;
obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dsaf_dev->dev),
hns_dsaf_acpi_dsm_uuid, 0,
&hns_dsaf_acpi_dsm_guid, 0,
HNS_OP_SERDES_LP_FUNC, &argv4);
if (!obj) {
dev_warn(mac_cb->dsaf_dev->dev, "set port%d serdes lp fail!",

View file

@ -222,13 +222,6 @@ struct device *nd_btt_create(struct nd_region *nd_region)
return dev;
}
static bool uuid_is_null(u8 *uuid)
{
static const u8 null_uuid[16];
return (memcmp(uuid, null_uuid, 16) == 0);
}
/**
* nd_btt_arena_is_valid - check if the metadata layout is valid
* @nd_btt: device with BTT geometry and backing device info
@ -249,7 +242,7 @@ bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super)
if (memcmp(super->signature, BTT_SIG, BTT_SIG_LEN) != 0)
return false;
if (!uuid_is_null(super->parent_uuid))
if (!guid_is_null((guid_t *)&super->parent_uuid))
if (memcmp(super->parent_uuid, parent_uuid, 16) != 0)
return false;

View file

@ -45,7 +45,7 @@ module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
EXPORT_SYMBOL_GPL(nvme_io_timeout);
unsigned char shutdown_timeout = 5;
static unsigned char shutdown_timeout = 5;
module_param(shutdown_timeout, byte, 0644);
MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
@ -65,11 +65,34 @@ static bool force_apst;
module_param(force_apst, bool, 0644);
MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL(nvme_wq);
static LIST_HEAD(nvme_ctrl_list);
static DEFINE_SPINLOCK(dev_list_lock);
static struct class *nvme_class;
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
return -EBUSY;
if (!queue_work(nvme_wq, &ctrl->reset_work))
return -EBUSY;
return 0;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
int ret;
ret = nvme_reset_ctrl(ctrl);
if (!ret)
flush_work(&ctrl->reset_work);
return ret;
}
static blk_status_t nvme_error_status(struct request *req)
{
switch (nvme_req(req)->status & 0x7ff) {
@ -157,7 +180,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
case NVME_CTRL_RECONNECTING:
changed = true;
/* FALLTHRU */
default:
@ -323,12 +345,22 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
return BLK_STS_OK;
}
static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd)
static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
struct request *req, struct nvme_command *cmnd)
{
u16 control = 0;
u32 dsmgmt = 0;
/*
* If formated with metadata, require the block layer provide a buffer
* unless this namespace is formated such that the metadata can be
* stripped/generated by the controller with PRACT=1.
*/
if (ns && ns->ms &&
(!ns->pi_type || ns->ms != sizeof(struct t10_pi_tuple)) &&
!blk_integrity_rq(req) && !blk_rq_is_passthrough(req))
return BLK_STS_NOTSUPP;
if (req->cmd_flags & REQ_FUA)
control |= NVME_RW_FUA;
if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
@ -362,6 +394,7 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
return 0;
}
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
@ -390,7 +423,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
break;
case REQ_OP_READ:
case REQ_OP_WRITE:
nvme_setup_rw(ns, req, cmd);
ret = nvme_setup_rw(ns, req, cmd);
break;
default:
WARN_ON_ONCE(1);
@ -592,7 +625,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
if (nvme_keep_alive(ctrl)) {
/* allocation failure, reset the controller */
dev_err(ctrl->device, "keep-alive failed\n");
ctrl->ops->reset_ctrl(ctrl);
nvme_reset_ctrl(ctrl);
return;
}
}
@ -636,6 +669,77 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
return error;
}
static int nvme_identify_ns_descs(struct nvme_ns *ns, unsigned nsid)
{
struct nvme_command c = { };
int status;
void *data;
int pos;
int len;
c.identify.opcode = nvme_admin_identify;
c.identify.nsid = cpu_to_le32(nsid);
c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
if (!data)
return -ENOMEM;
status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, data,
NVME_IDENTIFY_DATA_SIZE);
if (status)
goto free_data;
for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
struct nvme_ns_id_desc *cur = data + pos;
if (cur->nidl == 0)
break;
switch (cur->nidt) {
case NVME_NIDT_EUI64:
if (cur->nidl != NVME_NIDT_EUI64_LEN) {
dev_warn(ns->ctrl->device,
"ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
cur->nidl);
goto free_data;
}
len = NVME_NIDT_EUI64_LEN;
memcpy(ns->eui, data + pos + sizeof(*cur), len);
break;
case NVME_NIDT_NGUID:
if (cur->nidl != NVME_NIDT_NGUID_LEN) {
dev_warn(ns->ctrl->device,
"ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
cur->nidl);
goto free_data;
}
len = NVME_NIDT_NGUID_LEN;
memcpy(ns->nguid, data + pos + sizeof(*cur), len);
break;
case NVME_NIDT_UUID:
if (cur->nidl != NVME_NIDT_UUID_LEN) {
dev_warn(ns->ctrl->device,
"ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
cur->nidl);
goto free_data;
}
len = NVME_NIDT_UUID_LEN;
uuid_copy(&ns->uuid, data + pos + sizeof(*cur));
break;
default:
/* Skip unnkown types */
len = cur->nidl;
break;
}
len += sizeof(*cur);
}
free_data:
kfree(data);
return status;
}
static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
{
struct nvme_command c = { };
@ -745,7 +849,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
* access to the admin queue, as that might be only way to fix them up.
*/
if (status > 0) {
dev_err(ctrl->dev, "Could not set queue count (%d)\n", status);
dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
*count = 0;
} else {
nr_io_queues = min(result & 0xffff, result >> 16) + 1;
@ -976,6 +1080,12 @@ static void nvme_init_integrity(struct nvme_ns *ns)
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
static void nvme_set_chunk_size(struct nvme_ns *ns)
{
u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9));
blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
}
static void nvme_config_discard(struct nvme_ns *ns)
{
struct nvme_ctrl *ctrl = ns->ctrl;
@ -1009,7 +1119,15 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
if (ns->ctrl->vs >= NVME_VS(1, 1, 0))
memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
if (ns->ctrl->vs >= NVME_VS(1, 2, 0))
memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
memcpy(ns->nguid, (*id)->nguid, sizeof(ns->nguid));
if (ns->ctrl->vs >= NVME_VS(1, 3, 0)) {
/* Don't treat error as fatal we potentially
* already have a NGUID or EUI-64
*/
if (nvme_identify_ns_descs(ns, ns->ns_id))
dev_warn(ns->ctrl->device,
"%s: Identify Descriptors failed\n", __func__);
}
return 0;
}
@ -1027,12 +1145,15 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->lba_shift == 0)
ns->lba_shift = 9;
bs = 1 << ns->lba_shift;
ns->noiob = le16_to_cpu(id->noiob);
blk_mq_freeze_queue(disk->queue);
if (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
nvme_prep_integrity(disk, id, bs);
blk_queue_logical_block_size(ns->queue, bs);
if (ns->noiob)
nvme_set_chunk_size(ns);
if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
nvme_init_integrity(ns);
if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
@ -1276,7 +1397,7 @@ EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
{
unsigned long timeout = SHUTDOWN_TIMEOUT + jiffies;
unsigned long timeout = jiffies + (shutdown_timeout * HZ);
u32 csts;
int ret;
@ -1575,7 +1696,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
dev_warn(ctrl->dev, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
}
@ -1603,7 +1724,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
prev_apsta = ctrl->apsta;
if (ctrl->quirks & NVME_QUIRK_NO_APST) {
if (force_apst && id->apsta) {
dev_warn(ctrl->dev, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
ctrl->apsta = 1;
} else {
ctrl->apsta = 0;
@ -1627,12 +1748,14 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ret = -EINVAL;
if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
dev_err(ctrl->dev,
dev_err(ctrl->device,
"keep-alive support is mandatory for fabrics\n");
ret = -EINVAL;
}
} else {
ctrl->cntlid = le16_to_cpu(id->cntlid);
ctrl->hmpre = le32_to_cpu(id->hmpre);
ctrl->hmmin = le32_to_cpu(id->hmmin);
}
kfree(id);
@ -1728,7 +1851,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
dev_warn(ctrl->device, "resetting controller\n");
return ctrl->ops->reset_ctrl(ctrl);
return nvme_reset_ctrl_sync(ctrl);
case NVME_IOCTL_SUBSYS_RESET:
return nvme_reset_subsystem(ctrl);
case NVME_IOCTL_RESCAN:
@ -1754,7 +1877,7 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
int ret;
ret = ctrl->ops->reset_ctrl(ctrl);
ret = nvme_reset_ctrl_sync(ctrl);
if (ret < 0)
return ret;
return count;
@ -1780,8 +1903,8 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
int serial_len = sizeof(ctrl->serial);
int model_len = sizeof(ctrl->model);
if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
return sprintf(buf, "eui.%16phN\n", ns->uuid);
if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
return sprintf(buf, "eui.%16phN\n", ns->nguid);
if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
return sprintf(buf, "eui.%8phN\n", ns->eui);
@ -1796,11 +1919,28 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
return sprintf(buf, "%pU\n", ns->nguid);
}
static DEVICE_ATTR(nguid, S_IRUGO, nguid_show, NULL);
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
return sprintf(buf, "%pU\n", ns->uuid);
/* For backward compatibility expose the NGUID to userspace if
* we have no UUID set
*/
if (uuid_is_null(&ns->uuid)) {
printk_ratelimited(KERN_WARNING
"No UUID available providing old NGUID\n");
return sprintf(buf, "%pU\n", ns->nguid);
}
return sprintf(buf, "%pU\n", &ns->uuid);
}
static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
@ -1823,6 +1963,7 @@ static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
static struct attribute *nvme_ns_attrs[] = {
&dev_attr_wwid.attr,
&dev_attr_uuid.attr,
&dev_attr_nguid.attr,
&dev_attr_eui.attr,
&dev_attr_nsid.attr,
NULL,
@ -1835,7 +1976,12 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
if (a == &dev_attr_uuid.attr) {
if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
if (uuid_is_null(&ns->uuid) ||
!memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
return 0;
}
if (a == &dev_attr_nguid.attr) {
if (!memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
return 0;
}
if (a == &dev_attr_eui.attr) {
@ -2049,7 +2195,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (nvme_nvm_ns_supported(ns, id) &&
nvme_nvm_register(ns, disk_name, node)) {
dev_warn(ctrl->dev, "%s: LightNVM init failure\n", __func__);
dev_warn(ctrl->device, "%s: LightNVM init failure\n", __func__);
goto out_free_id;
}
@ -2224,7 +2370,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
* removal.
*/
if (ctrl->state == NVME_CTRL_LIVE)
schedule_work(&ctrl->scan_work);
queue_work(nvme_wq, &ctrl->scan_work);
}
EXPORT_SYMBOL_GPL(nvme_queue_scan);
@ -2279,7 +2425,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
/*FALLTHRU*/
case NVME_SC_ABORT_REQ:
++ctrl->event_limit;
schedule_work(&ctrl->async_event_work);
queue_work(nvme_wq, &ctrl->async_event_work);
break;
default:
break;
@ -2302,7 +2448,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
void nvme_queue_async_events(struct nvme_ctrl *ctrl)
{
ctrl->event_limit = NVME_NR_AERS;
schedule_work(&ctrl->async_event_work);
queue_work(nvme_wq, &ctrl->async_event_work);
}
EXPORT_SYMBOL_GPL(nvme_queue_async_events);
@ -2537,10 +2683,15 @@ int __init nvme_core_init(void)
{
int result;
nvme_wq = alloc_workqueue("nvme-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvme_wq)
return -ENOMEM;
result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
&nvme_dev_fops);
if (result < 0)
return result;
goto destroy_wq;
else if (result > 0)
nvme_char_major = result;
@ -2552,8 +2703,10 @@ int __init nvme_core_init(void)
return 0;
unregister_chrdev:
unregister_chrdev:
__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
destroy_wq:
destroy_workqueue(nvme_wq);
return result;
}
@ -2561,6 +2714,7 @@ void nvme_core_exit(void)
{
class_destroy(nvme_class);
__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
destroy_workqueue(nvme_wq);
}
MODULE_LICENSE("GPL");

View file

@ -58,7 +58,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
kref_init(&host->ref);
memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
uuid_be_gen(&host->id);
uuid_gen(&host->id);
list_add_tail(&host->list, &nvmf_hosts);
out_unlock:
@ -75,7 +75,7 @@ static struct nvmf_host *nvmf_host_default(void)
return NULL;
kref_init(&host->ref);
uuid_be_gen(&host->id);
uuid_gen(&host->id);
snprintf(host->nqn, NVMF_NQN_SIZE,
"nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
@ -337,6 +337,24 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
}
}
break;
case NVME_SC_CONNECT_INVALID_HOST:
dev_err(ctrl->device,
"Connect for subsystem %s is not allowed, hostnqn: %s\n",
data->subsysnqn, data->hostnqn);
break;
case NVME_SC_CONNECT_CTRL_BUSY:
dev_err(ctrl->device,
"Connect command failed: controller is busy or not available\n");
break;
case NVME_SC_CONNECT_FORMAT:
dev_err(ctrl->device,
"Connect incompatible format: %d",
cmd->connect.recfmt);
break;
default:
dev_err(ctrl->device,
"Connect command failed, error wo/DNR bit: %d\n",
@ -395,7 +413,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
if (!data)
return -ENOMEM;
memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
uuid_copy(&data->hostid, &ctrl->opts->host->id);
data->cntlid = cpu_to_le16(0xffff);
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
@ -454,7 +472,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
if (!data)
return -ENOMEM;
memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
uuid_copy(&data->hostid, &ctrl->opts->host->id);
data->cntlid = cpu_to_le16(ctrl->cntlid);
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
@ -474,7 +492,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
{
if (ctrl->opts->max_reconnects != -1 &&
ctrl->opts->nr_reconnects < ctrl->opts->max_reconnects)
ctrl->nr_reconnects < ctrl->opts->max_reconnects)
return true;
return false;

View file

@ -36,7 +36,7 @@ struct nvmf_host {
struct kref ref;
struct list_head list;
char nqn[NVMF_NQN_SIZE];
uuid_be id;
uuid_t id;
};
/**
@ -80,7 +80,6 @@ enum {
* @discovery_nqn: indicates if the subsysnqn is the well-known discovery NQN.
* @kato: Keep-alive timeout.
* @host: Virtual NVMe host, contains the NQN and Host ID.
* @nr_reconnects: number of reconnect attempted since the last ctrl failure
* @max_reconnects: maximum number of allowed reconnect attempts before removing
* the controller, (-1) means reconnect forever, zero means remove
* immediately;
@ -98,7 +97,6 @@ struct nvmf_ctrl_options {
bool discovery_nqn;
unsigned int kato;
struct nvmf_host *host;
int nr_reconnects;
int max_reconnects;
};

View file

@ -161,7 +161,6 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set tag_set;
struct work_struct delete_work;
struct work_struct reset_work;
struct delayed_work connect_work;
struct kref ref;
@ -214,7 +213,6 @@ static LIST_HEAD(nvme_fc_lport_list);
static DEFINE_IDA(nvme_fc_local_port_cnt);
static DEFINE_IDA(nvme_fc_ctrl_cnt);
static struct workqueue_struct *nvme_fc_wq;
@ -878,8 +876,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
/* Linux supports only Dynamic controllers */
assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
@ -1450,18 +1447,8 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
{
struct nvme_fc_ctrl *ctrl = set->driver_data;
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
}
static int
nvme_fc_init_admin_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct nvme_fc_ctrl *ctrl = set->driver_data;
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_queue *queue = &ctrl->queues[0];
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
}
@ -1776,10 +1763,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
return;
}
if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: error_recovery: Failed to schedule "
"reset work\n", ctrl->cnum);
nvme_reset_ctrl(&ctrl->ctrl);
}
static enum blk_eh_timer_return
@ -2312,7 +2296,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
int ret;
bool changed;
++ctrl->ctrl.opts->nr_reconnects;
++ctrl->ctrl.nr_reconnects;
/*
* Create the admin queue
@ -2409,7 +2393,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
WARN_ON_ONCE(!changed);
ctrl->ctrl.opts->nr_reconnects = 0;
ctrl->ctrl.nr_reconnects = 0;
if (ctrl->queue_count > 1) {
nvme_start_queues(&ctrl->ctrl);
@ -2529,7 +2513,7 @@ nvme_fc_delete_ctrl_work(struct work_struct *work)
struct nvme_fc_ctrl *ctrl =
container_of(work, struct nvme_fc_ctrl, delete_work);
cancel_work_sync(&ctrl->reset_work);
cancel_work_sync(&ctrl->ctrl.reset_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
@ -2556,7 +2540,7 @@ __nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl)
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
return true;
if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
if (!queue_work(nvme_wq, &ctrl->delete_work))
return true;
return false;
@ -2583,7 +2567,7 @@ nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
ret = __nvme_fc_del_ctrl(ctrl);
if (!ret)
flush_workqueue(nvme_fc_wq);
flush_workqueue(nvme_wq);
nvme_put_ctrl(&ctrl->ctrl);
@ -2608,13 +2592,13 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
queue_delayed_work(nvme_wq, &ctrl->connect_work,
ctrl->ctrl.opts->reconnect_delay * HZ);
} else {
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: Max reconnect attempts (%d) "
"reached. Removing controller\n",
ctrl->cnum, ctrl->ctrl.opts->nr_reconnects);
ctrl->cnum, ctrl->ctrl.nr_reconnects);
WARN_ON(__nvme_fc_schedule_delete_work(ctrl));
}
}
@ -2623,7 +2607,7 @@ static void
nvme_fc_reset_ctrl_work(struct work_struct *work)
{
struct nvme_fc_ctrl *ctrl =
container_of(work, struct nvme_fc_ctrl, reset_work);
container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
int ret;
/* will block will waiting for io to terminate */
@ -2637,29 +2621,6 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
"NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
}
/*
* called by the nvme core layer, for sysfs interface that requests
* a reset of the nvme controller
*/
static int
nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
return -EBUSY;
if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
return -EBUSY;
flush_work(&ctrl->reset_work);
return 0;
}
static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.name = "fc",
.module = THIS_MODULE,
@ -2667,7 +2628,6 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
.reset_ctrl = nvme_fc_reset_nvme_ctrl,
.free_ctrl = nvme_fc_nvme_ctrl_freed,
.submit_async_event = nvme_fc_submit_async_event,
.delete_ctrl = nvme_fc_del_nvme_ctrl,
@ -2697,7 +2657,7 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
.queue_rq = nvme_fc_queue_rq,
.complete = nvme_fc_complete_rq,
.init_request = nvme_fc_init_admin_request,
.init_request = nvme_fc_init_request,
.exit_request = nvme_fc_exit_request,
.reinit_request = nvme_fc_reinit_request,
.init_hctx = nvme_fc_init_admin_hctx,
@ -2742,7 +2702,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
kref_init(&ctrl->ref);
INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
spin_lock_init(&ctrl->lock);
@ -2967,20 +2927,7 @@ static struct nvmf_transport_ops nvme_fc_transport = {
static int __init nvme_fc_init_module(void)
{
int ret;
nvme_fc_wq = create_workqueue("nvme_fc_wq");
if (!nvme_fc_wq)
return -ENOMEM;
ret = nvmf_register_transport(&nvme_fc_transport);
if (ret)
goto err;
return 0;
err:
destroy_workqueue(nvme_fc_wq);
return ret;
return nvmf_register_transport(&nvme_fc_transport);
}
static void __exit nvme_fc_exit_module(void)
@ -2991,8 +2938,6 @@ static void __exit nvme_fc_exit_module(void)
nvmf_unregister_transport(&nvme_fc_transport);
destroy_workqueue(nvme_fc_wq);
ida_destroy(&nvme_fc_local_port_cnt);
ida_destroy(&nvme_fc_ctrl_cnt);
}

View file

@ -242,7 +242,7 @@ static inline void _nvme_nvm_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16);
BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
}

View file

@ -27,12 +27,11 @@ extern unsigned char nvme_io_timeout;
extern unsigned char admin_timeout;
#define ADMIN_TIMEOUT (admin_timeout * HZ)
extern unsigned char shutdown_timeout;
#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
#define NVME_DEFAULT_KATO 5
#define NVME_KATO_GRACE 10
extern struct workqueue_struct *nvme_wq;
enum {
NVME_NS_LBA = 0,
NVME_NS_LIGHTNVM = 1,
@ -131,6 +130,7 @@ struct nvme_ctrl {
struct device *device; /* char device */
struct list_head node;
struct ida ns_ida;
struct work_struct reset_work;
struct opal_dev *opal_dev;
@ -166,12 +166,16 @@ struct nvme_ctrl {
/* Power saving configuration */
u64 ps_max_latency_us;
u32 hmpre;
u32 hmmin;
/* Fabrics only */
u16 sqsize;
u32 ioccsz;
u32 iorcsz;
u16 icdoff;
u16 maxcmd;
int nr_reconnects;
struct nvmf_ctrl_options *opts;
};
@ -189,7 +193,8 @@ struct nvme_ns {
int instance;
u8 eui[8];
u8 uuid[16];
u8 nguid[16];
uuid_t uuid;
unsigned ns_id;
int lba_shift;
@ -197,6 +202,7 @@ struct nvme_ns {
bool ext;
u8 pi_type;
unsigned long flags;
u16 noiob;
#define NVME_NS_REMOVING 0
#define NVME_NS_DEAD 1
@ -214,7 +220,6 @@ struct nvme_ctrl_ops {
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int (*reset_ctrl)(struct nvme_ctrl *ctrl);
void (*free_ctrl)(struct nvme_ctrl *ctrl);
void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
int (*delete_ctrl)(struct nvme_ctrl *ctrl);
@ -321,6 +326,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
struct sg_io_hdr;

View file

@ -17,28 +17,15 @@
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-pci.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/genhd.h>
#include <linux/hdreg.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kdev_t.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/poison.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/t10-pi.h>
#include <linux/timer.h>
#include <linux/types.h>
@ -66,12 +53,14 @@ static bool use_cmb_sqes = true;
module_param(use_cmb_sqes, bool, 0644);
MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
static struct workqueue_struct *nvme_workq;
static unsigned int max_host_mem_size_mb = 128;
module_param(max_host_mem_size_mb, uint, 0444);
MODULE_PARM_DESC(max_host_mem_size_mb,
"Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
struct nvme_dev;
struct nvme_queue;
static int nvme_reset(struct nvme_dev *dev);
static void nvme_process_cq(struct nvme_queue *nvmeq);
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
@ -92,9 +81,8 @@ struct nvme_dev {
int q_depth;
u32 db_stride;
void __iomem *bar;
struct work_struct reset_work;
unsigned long bar_mapped_size;
struct work_struct remove_work;
struct timer_list watchdog_timer;
struct mutex shutdown_lock;
bool subsystem;
void __iomem *cmb;
@ -104,10 +92,18 @@ struct nvme_dev {
u32 cmbloc;
struct nvme_ctrl ctrl;
struct completion ioq_wait;
/* shadow doorbell buffer support: */
u32 *dbbuf_dbs;
dma_addr_t dbbuf_dbs_dma_addr;
u32 *dbbuf_eis;
dma_addr_t dbbuf_eis_dma_addr;
/* host memory buffer support: */
u64 host_mem_size;
u32 nr_host_mem_descs;
struct nvme_host_mem_buf_desc *host_mem_descs;
void **host_mem_desc_bufs;
};
static inline unsigned int sq_idx(unsigned int qid, u32 stride)
@ -185,8 +181,8 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
@ -350,19 +346,6 @@ static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_i
nvmeq->tags = NULL;
}
static int nvme_admin_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = dev->queues[0];
BUG_ON(!nvmeq);
iod->nvmeq = nvmeq;
return 0;
}
static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
@ -382,7 +365,8 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
{
struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
struct nvme_queue *nvmeq = dev->queues[queue_idx];
BUG_ON(!nvmeq);
iod->nvmeq = nvmeq;
@ -696,18 +680,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_dev *dev = nvmeq->dev;
struct request *req = bd->rq;
struct nvme_command cmnd;
blk_status_t ret = BLK_STS_OK;
/*
* If formated with metadata, require the block layer provide a buffer
* unless this namespace is formated such that the metadata can be
* stripped/generated by the controller with PRACT=1.
*/
if (ns && ns->ms && !blk_integrity_rq(req)) {
if (!(ns->pi_type && ns->ms == 8) &&
!blk_rq_is_passthrough(req))
return BLK_STS_NOTSUPP;
}
blk_status_t ret;
ret = nvme_setup_cmd(ns, req, &cmnd);
if (ret)
@ -948,6 +921,51 @@ static void abort_endio(struct request *req, blk_status_t error)
blk_mq_free_request(req);
}
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
{
/* If true, indicates loss of adapter communication, possibly by a
* NVMe Subsystem reset.
*/
bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
/* If there is a reset ongoing, we shouldn't reset again. */
if (dev->ctrl.state == NVME_CTRL_RESETTING)
return false;
/* We shouldn't reset unless the controller is on fatal error state
* _or_ if we lost the communication with it.
*/
if (!(csts & NVME_CSTS_CFS) && !nssro)
return false;
/* If PCI error recovery process is happening, we cannot reset or
* the recovery mechanism will surely fail.
*/
if (pci_channel_offline(to_pci_dev(dev->dev)))
return false;
return true;
}
static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
{
/* Read a config register to help see what died. */
u16 pci_status;
int result;
result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
&pci_status);
if (result == PCIBIOS_SUCCESSFUL)
dev_warn(dev->ctrl.device,
"controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
csts, pci_status);
else
dev_warn(dev->ctrl.device,
"controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
csts, result);
}
static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@ -955,6 +973,17 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
struct nvme_dev *dev = nvmeq->dev;
struct request *abort_req;
struct nvme_command cmd;
u32 csts = readl(dev->bar + NVME_REG_CSTS);
/*
* Reset immediately if the controller is failed
*/
if (nvme_should_reset(dev, csts)) {
nvme_warn_reset(dev, csts);
nvme_dev_disable(dev, false);
nvme_reset_ctrl(&dev->ctrl);
return BLK_EH_HANDLED;
}
/*
* Did we miss an interrupt?
@ -991,7 +1020,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
nvme_dev_disable(dev, false);
nvme_reset(dev);
nvme_reset_ctrl(&dev->ctrl);
/*
* Mark the request as handled, since the inline shutdown
@ -1245,7 +1274,7 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
.complete = nvme_pci_complete_rq,
.init_hctx = nvme_admin_init_hctx,
.exit_hctx = nvme_admin_exit_hctx,
.init_request = nvme_admin_init_request,
.init_request = nvme_init_request,
.timeout = nvme_timeout,
};
@ -1309,6 +1338,32 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
return 0;
}
static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride);
}
static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (size <= dev->bar_mapped_size)
return 0;
if (size > pci_resource_len(pdev, 0))
return -ENOMEM;
if (dev->bar)
iounmap(dev->bar);
dev->bar = ioremap(pci_resource_start(pdev, 0), size);
if (!dev->bar) {
dev->bar_mapped_size = 0;
return -ENOMEM;
}
dev->bar_mapped_size = size;
dev->dbs = dev->bar + NVME_REG_DBS;
return 0;
}
static int nvme_configure_admin_queue(struct nvme_dev *dev)
{
int result;
@ -1316,6 +1371,10 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
struct nvme_queue *nvmeq;
result = nvme_remap_bar(dev, db_bar_size(dev, 0));
if (result < 0)
return result;
dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
NVME_CAP_NSSRC(cap) : 0;
@ -1356,66 +1415,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
return result;
}
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
{
/* If true, indicates loss of adapter communication, possibly by a
* NVMe Subsystem reset.
*/
bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
/* If there is a reset ongoing, we shouldn't reset again. */
if (dev->ctrl.state == NVME_CTRL_RESETTING)
return false;
/* We shouldn't reset unless the controller is on fatal error state
* _or_ if we lost the communication with it.
*/
if (!(csts & NVME_CSTS_CFS) && !nssro)
return false;
/* If PCI error recovery process is happening, we cannot reset or
* the recovery mechanism will surely fail.
*/
if (pci_channel_offline(to_pci_dev(dev->dev)))
return false;
return true;
}
static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
{
/* Read a config register to help see what died. */
u16 pci_status;
int result;
result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
&pci_status);
if (result == PCIBIOS_SUCCESSFUL)
dev_warn(dev->ctrl.device,
"controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
csts, pci_status);
else
dev_warn(dev->ctrl.device,
"controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
csts, result);
}
static void nvme_watchdog_timer(unsigned long data)
{
struct nvme_dev *dev = (struct nvme_dev *)data;
u32 csts = readl(dev->bar + NVME_REG_CSTS);
/* Skip controllers under certain specific conditions. */
if (nvme_should_reset(dev, csts)) {
if (!nvme_reset(dev))
nvme_warn_reset(dev, csts);
return;
}
mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
}
static int nvme_create_io_queues(struct nvme_dev *dev)
{
unsigned i, max;
@ -1512,16 +1511,168 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
}
}
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{
return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs);
struct nvme_command c;
u64 dma_addr;
int ret;
dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev->dev, dma_addr))
return -ENOMEM;
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features;
c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
c.features.dword11 = cpu_to_le32(bits);
c.features.dword12 = cpu_to_le32(dev->host_mem_size >>
ilog2(dev->ctrl.page_size));
c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr));
c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr));
c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs);
ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
if (ret) {
dev_warn(dev->ctrl.device,
"failed to set host mem (err %d, flags %#x).\n",
ret, bits);
}
dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE);
return ret;
}
static void nvme_free_host_mem(struct nvme_dev *dev)
{
int i;
for (i = 0; i < dev->nr_host_mem_descs; i++) {
struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
le64_to_cpu(desc->addr));
}
kfree(dev->host_mem_desc_bufs);
dev->host_mem_desc_bufs = NULL;
kfree(dev->host_mem_descs);
dev->host_mem_descs = NULL;
}
static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{
struct nvme_host_mem_buf_desc *descs;
u32 chunk_size, max_entries, i = 0;
void **bufs;
u64 size, tmp;
/* start big and work our way down */
chunk_size = min(preferred, (u64)PAGE_SIZE << MAX_ORDER);
retry:
tmp = (preferred + chunk_size - 1);
do_div(tmp, chunk_size);
max_entries = tmp;
descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL);
if (!descs)
goto out;
bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL);
if (!bufs)
goto out_free_descs;
for (size = 0; size < preferred; size += chunk_size) {
u32 len = min_t(u64, chunk_size, preferred - size);
dma_addr_t dma_addr;
bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
if (!bufs[i])
break;
descs[i].addr = cpu_to_le64(dma_addr);
descs[i].size = cpu_to_le32(len / dev->ctrl.page_size);
i++;
}
if (!size || (min && size < min)) {
dev_warn(dev->ctrl.device,
"failed to allocate host memory buffer.\n");
goto out_free_bufs;
}
dev_info(dev->ctrl.device,
"allocated %lld MiB host memory buffer.\n",
size >> ilog2(SZ_1M));
dev->nr_host_mem_descs = i;
dev->host_mem_size = size;
dev->host_mem_descs = descs;
dev->host_mem_desc_bufs = bufs;
return 0;
out_free_bufs:
while (--i >= 0) {
size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
dma_free_coherent(dev->dev, size, bufs[i],
le64_to_cpu(descs[i].addr));
}
kfree(bufs);
out_free_descs:
kfree(descs);
out:
/* try a smaller chunk size if we failed early */
if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
chunk_size /= 2;
goto retry;
}
dev->host_mem_descs = NULL;
return -ENOMEM;
}
static void nvme_setup_host_mem(struct nvme_dev *dev)
{
u64 max = (u64)max_host_mem_size_mb * SZ_1M;
u64 preferred = (u64)dev->ctrl.hmpre * 4096;
u64 min = (u64)dev->ctrl.hmmin * 4096;
u32 enable_bits = NVME_HOST_MEM_ENABLE;
preferred = min(preferred, max);
if (min > max) {
dev_warn(dev->ctrl.device,
"min host memory (%lld MiB) above limit (%d MiB).\n",
min >> ilog2(SZ_1M), max_host_mem_size_mb);
nvme_free_host_mem(dev);
return;
}
/*
* If we already have a buffer allocated check if we can reuse it.
*/
if (dev->host_mem_descs) {
if (dev->host_mem_size >= min)
enable_bits |= NVME_HOST_MEM_RETURN;
else
nvme_free_host_mem(dev);
}
if (!dev->host_mem_descs) {
if (nvme_alloc_host_mem(dev, min, preferred))
return;
}
if (nvme_set_host_mem(dev, enable_bits))
nvme_free_host_mem(dev);
}
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
struct nvme_queue *adminq = dev->queues[0];
struct pci_dev *pdev = to_pci_dev(dev->dev);
int result, nr_io_queues, size;
int result, nr_io_queues;
unsigned long size;
nr_io_queues = num_online_cpus();
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
@ -1540,20 +1691,15 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
nvme_release_cmb(dev);
}
size = db_bar_size(dev, nr_io_queues);
if (size > 8192) {
iounmap(dev->bar);
do {
dev->bar = ioremap(pci_resource_start(pdev, 0), size);
if (dev->bar)
break;
if (!--nr_io_queues)
return -ENOMEM;
size = db_bar_size(dev, nr_io_queues);
} while (1);
dev->dbs = dev->bar + 4096;
adminq->q_db = dev->dbs;
}
do {
size = db_bar_size(dev, nr_io_queues);
result = nvme_remap_bar(dev, size);
if (!result)
break;
if (!--nr_io_queues)
return -ENOMEM;
} while (1);
adminq->q_db = dev->dbs;
/* Deregister the admin queue's interrupt */
pci_free_irq(pdev, 0, adminq);
@ -1797,8 +1943,6 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
bool dead = true;
struct pci_dev *pdev = to_pci_dev(dev->dev);
del_timer_sync(&dev->watchdog_timer);
mutex_lock(&dev->shutdown_lock);
if (pci_is_enabled(pdev)) {
u32 csts = readl(dev->bar + NVME_REG_CSTS);
@ -1813,8 +1957,20 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
* Give the controller a chance to complete all entered requests if
* doing a safe shutdown.
*/
if (!dead && shutdown)
nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
if (!dead) {
if (shutdown)
nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
/*
* If the controller is still alive tell it to stop using the
* host memory buffer. In theory the shutdown / reset should
* make sure that it doesn't access the host memoery anymore,
* but I'd rather be safe than sorry..
*/
if (dev->host_mem_descs)
nvme_set_host_mem(dev, 0);
}
nvme_stop_queues(&dev->ctrl);
queues = dev->online_queues - 1;
@ -1897,7 +2053,8 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
static void nvme_reset_work(struct work_struct *work)
{
struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
struct nvme_dev *dev =
container_of(work, struct nvme_dev, ctrl.reset_work);
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result = -ENODEV;
@ -1946,6 +2103,9 @@ static void nvme_reset_work(struct work_struct *work)
"unable to allocate dma for dbbuf\n");
}
if (dev->ctrl.hmpre)
nvme_setup_host_mem(dev);
result = nvme_setup_io_queues(dev);
if (result)
goto out;
@ -1959,8 +2119,6 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->online_queues > 1)
nvme_queue_async_events(&dev->ctrl);
mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
/*
* Keep the controller around but remove all namespaces if we don't have
* any working I/O queue.
@ -2000,17 +2158,6 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
nvme_put_ctrl(&dev->ctrl);
}
static int nvme_reset(struct nvme_dev *dev)
{
if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
return -ENODEV;
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
return -EBUSY;
if (!queue_work(nvme_workq, &dev->reset_work))
return -EBUSY;
return 0;
}
static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
{
*val = readl(to_nvme_dev(ctrl)->bar + off);
@ -2029,16 +2176,6 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
return 0;
}
static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
int ret = nvme_reset(dev);
if (!ret)
flush_work(&dev->reset_work);
return ret;
}
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie",
.module = THIS_MODULE,
@ -2046,7 +2183,6 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
.reg_read64 = nvme_pci_reg_read64,
.reset_ctrl = nvme_pci_reset_ctrl,
.free_ctrl = nvme_pci_free_ctrl,
.submit_async_event = nvme_pci_submit_async_event,
};
@ -2058,8 +2194,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
if (pci_request_mem_regions(pdev, "nvme"))
return -ENODEV;
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
if (!dev->bar)
if (nvme_remap_bar(dev, NVME_REG_DBS + 4096))
goto release;
return 0;
@ -2113,10 +2248,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto free;
INIT_WORK(&dev->reset_work, nvme_reset_work);
INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
setup_timer(&dev->watchdog_timer, nvme_watchdog_timer,
(unsigned long)dev);
mutex_init(&dev->shutdown_lock);
init_completion(&dev->ioq_wait);
@ -2134,7 +2267,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
queue_work(nvme_workq, &dev->reset_work);
queue_work(nvme_wq, &dev->ctrl.reset_work);
return 0;
release_pools:
@ -2155,7 +2288,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
if (prepare)
nvme_dev_disable(dev, false);
else
nvme_reset(dev);
nvme_reset_ctrl(&dev->ctrl);
}
static void nvme_shutdown(struct pci_dev *pdev)
@ -2175,7 +2308,7 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
cancel_work_sync(&dev->reset_work);
cancel_work_sync(&dev->ctrl.reset_work);
pci_set_drvdata(pdev, NULL);
if (!pci_device_is_present(pdev)) {
@ -2183,9 +2316,10 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_dev_disable(dev, false);
}
flush_work(&dev->reset_work);
flush_work(&dev->ctrl.reset_work);
nvme_uninit_ctrl(&dev->ctrl);
nvme_dev_disable(dev, true);
nvme_free_host_mem(dev);
nvme_dev_remove_admin(dev);
nvme_free_queues(dev, 0);
nvme_release_prp_pools(dev);
@ -2226,7 +2360,7 @@ static int nvme_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
nvme_reset(ndev);
nvme_reset_ctrl(&ndev->ctrl);
return 0;
}
#endif
@ -2265,7 +2399,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
dev_info(dev->ctrl.device, "restart after slot reset\n");
pci_restore_state(pdev);
nvme_reset(dev);
nvme_reset_ctrl(&dev->ctrl);
return PCI_ERS_RESULT_RECOVERED;
}
@ -2321,22 +2455,12 @@ static struct pci_driver nvme_driver = {
static int __init nvme_init(void)
{
int result;
nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
if (!nvme_workq)
return -ENOMEM;
result = pci_register_driver(&nvme_driver);
if (result)
destroy_workqueue(nvme_workq);
return result;
return pci_register_driver(&nvme_driver);
}
static void __exit nvme_exit(void)
{
pci_unregister_driver(&nvme_driver);
destroy_workqueue(nvme_workq);
_nvme_check_size();
}

View file

@ -80,10 +80,8 @@ struct nvme_rdma_request {
};
enum nvme_rdma_queue_flags {
NVME_RDMA_Q_CONNECTED = (1 << 0),
NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
NVME_RDMA_Q_DELETING = (1 << 2),
NVME_RDMA_Q_LIVE = (1 << 3),
NVME_RDMA_Q_LIVE = 0,
NVME_RDMA_Q_DELETING = 1,
};
struct nvme_rdma_queue {
@ -103,9 +101,6 @@ struct nvme_rdma_queue {
};
struct nvme_rdma_ctrl {
/* read and written in the hot path */
spinlock_t lock;
/* read only in the hot path */
struct nvme_rdma_queue *queues;
u32 queue_count;
@ -113,7 +108,6 @@ struct nvme_rdma_ctrl {
/* other member variables */
struct blk_mq_tag_set tag_set;
struct work_struct delete_work;
struct work_struct reset_work;
struct work_struct err_work;
struct nvme_rdma_qe async_event_sqe;
@ -145,8 +139,6 @@ static DEFINE_MUTEX(device_list_mutex);
static LIST_HEAD(nvme_rdma_ctrl_list);
static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);
static struct workqueue_struct *nvme_rdma_wq;
/*
* Disabling this option makes small I/O goes faster, but is fundamentally
* unsafe. With it turned off we will have to register a global rkey that
@ -301,10 +293,12 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
return ret;
}
static void __nvme_rdma_exit_request(struct nvme_rdma_ctrl *ctrl,
struct request *rq, unsigned int queue_idx)
static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx)
{
struct nvme_rdma_ctrl *ctrl = set->driver_data;
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
struct nvme_rdma_device *dev = queue->device;
@ -315,22 +309,13 @@ static void __nvme_rdma_exit_request(struct nvme_rdma_ctrl *ctrl,
DMA_TO_DEVICE);
}
static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx)
{
return __nvme_rdma_exit_request(set->driver_data, rq, hctx_idx + 1);
}
static void nvme_rdma_exit_admin_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx)
{
return __nvme_rdma_exit_request(set->driver_data, rq, 0);
}
static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
struct request *rq, unsigned int queue_idx)
static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx,
unsigned int numa_node)
{
struct nvme_rdma_ctrl *ctrl = set->driver_data;
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
@ -358,20 +343,6 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
return -ENOMEM;
}
static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx,
unsigned int numa_node)
{
return __nvme_rdma_init_request(set->driver_data, rq, hctx_idx + 1);
}
static int nvme_rdma_init_admin_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx,
unsigned int numa_node)
{
return __nvme_rdma_init_request(set->driver_data, rq, 0);
}
static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
@ -469,9 +440,6 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
struct nvme_rdma_device *dev;
struct ib_device *ibdev;
if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags))
return;
dev = queue->device;
ibdev = dev->dev;
rdma_destroy_qp(queue->cm_id);
@ -483,17 +451,21 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
nvme_rdma_dev_put(dev);
}
static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
struct nvme_rdma_device *dev)
static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
{
struct ib_device *ibdev = dev->dev;
struct ib_device *ibdev;
const int send_wr_factor = 3; /* MR, SEND, INV */
const int cq_factor = send_wr_factor + 1; /* + RECV */
int comp_vector, idx = nvme_rdma_queue_idx(queue);
int ret;
queue->device = dev;
queue->device = nvme_rdma_find_get_device(queue->cm_id);
if (!queue->device) {
dev_err(queue->cm_id->device->dev.parent,
"no client data found!\n");
return -ECONNREFUSED;
}
ibdev = queue->device->dev;
/*
* The admin queue is barely used once the controller is live, so don't
@ -506,12 +478,12 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
/* +1 for ib_stop_cq */
queue->ib_cq = ib_alloc_cq(dev->dev, queue,
cq_factor * queue->queue_size + 1, comp_vector,
IB_POLL_SOFTIRQ);
queue->ib_cq = ib_alloc_cq(ibdev, queue,
cq_factor * queue->queue_size + 1,
comp_vector, IB_POLL_SOFTIRQ);
if (IS_ERR(queue->ib_cq)) {
ret = PTR_ERR(queue->ib_cq);
goto out;
goto out_put_dev;
}
ret = nvme_rdma_create_qp(queue, send_wr_factor);
@ -524,7 +496,6 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
ret = -ENOMEM;
goto out_destroy_qp;
}
set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags);
return 0;
@ -532,7 +503,8 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
ib_destroy_qp(queue->qp);
out_destroy_ib_cq:
ib_free_cq(queue->ib_cq);
out:
out_put_dev:
nvme_rdma_dev_put(queue->device);
return ret;
}
@ -583,12 +555,10 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
}
clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
return 0;
out_destroy_cm_id:
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
return ret;
}
@ -718,11 +688,11 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
if (nvmf_should_reconnect(&ctrl->ctrl)) {
dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
ctrl->ctrl.opts->reconnect_delay);
queue_delayed_work(nvme_rdma_wq, &ctrl->reconnect_work,
queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
ctrl->ctrl.opts->reconnect_delay * HZ);
} else {
dev_info(ctrl->ctrl.device, "Removing controller...\n");
queue_work(nvme_rdma_wq, &ctrl->delete_work);
queue_work(nvme_wq, &ctrl->delete_work);
}
}
@ -733,7 +703,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
bool changed;
int ret;
++ctrl->ctrl.opts->nr_reconnects;
++ctrl->ctrl.nr_reconnects;
if (ctrl->queue_count > 1) {
nvme_rdma_free_io_queues(ctrl);
@ -777,7 +747,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
WARN_ON_ONCE(!changed);
ctrl->ctrl.opts->nr_reconnects = 0;
ctrl->ctrl.nr_reconnects = 0;
if (ctrl->queue_count > 1) {
nvme_queue_scan(&ctrl->ctrl);
@ -790,7 +760,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
requeue:
dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
ctrl->ctrl.opts->nr_reconnects);
ctrl->ctrl.nr_reconnects);
nvme_rdma_reconnect_or_remove(ctrl);
}
@ -802,10 +772,8 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
nvme_stop_keep_alive(&ctrl->ctrl);
for (i = 0; i < ctrl->queue_count; i++) {
clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
for (i = 0; i < ctrl->queue_count; i++)
clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
}
if (ctrl->queue_count > 1)
nvme_stop_queues(&ctrl->ctrl);
@ -833,7 +801,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
return;
queue_work(nvme_rdma_wq, &ctrl->err_work);
queue_work(nvme_wq, &ctrl->err_work);
}
static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
@ -1278,21 +1246,11 @@ static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
{
struct nvme_rdma_device *dev;
int ret;
dev = nvme_rdma_find_get_device(queue->cm_id);
if (!dev) {
dev_err(queue->cm_id->device->dev.parent,
"no client data found!\n");
return -ECONNREFUSED;
}
ret = nvme_rdma_create_queue_ib(queue, dev);
if (ret) {
nvme_rdma_dev_put(dev);
goto out;
}
ret = nvme_rdma_create_queue_ib(queue);
if (ret)
return ret;
ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
if (ret) {
@ -1306,7 +1264,6 @@ static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
out_destroy_queue:
nvme_rdma_destroy_queue_ib(queue);
out:
return ret;
}
@ -1383,12 +1340,14 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
complete(&queue->cm_done);
return 0;
case RDMA_CM_EVENT_REJECTED:
nvme_rdma_destroy_queue_ib(queue);
cm_error = nvme_rdma_conn_rejected(queue, ev);
break;
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
nvme_rdma_destroy_queue_ib(queue);
case RDMA_CM_EVENT_ADDR_ERROR:
dev_dbg(queue->ctrl->ctrl.device,
"CM error event %d\n", ev->event);
cm_error = -ECONNRESET;
@ -1524,7 +1483,6 @@ static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
struct ib_wc wc;
int found = 0;
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
while (ib_poll_cq(cq, 1, &wc) > 0) {
struct ib_cqe *cqe = wc.wr_cqe;
@ -1561,8 +1519,8 @@ static const struct blk_mq_ops nvme_rdma_mq_ops = {
static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
.queue_rq = nvme_rdma_queue_rq,
.complete = nvme_rdma_complete_rq,
.init_request = nvme_rdma_init_admin_request,
.exit_request = nvme_rdma_exit_admin_request,
.init_request = nvme_rdma_init_request,
.exit_request = nvme_rdma_exit_request,
.reinit_request = nvme_rdma_reinit_request,
.init_hctx = nvme_rdma_init_admin_hctx,
.timeout = nvme_rdma_timeout,
@ -1673,7 +1631,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
nvme_rdma_free_io_queues(ctrl);
}
if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags))
if (test_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags))
nvme_shutdown_ctrl(&ctrl->ctrl);
blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
@ -1710,7 +1668,7 @@ static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
return -EBUSY;
if (!queue_work(nvme_rdma_wq, &ctrl->delete_work))
if (!queue_work(nvme_wq, &ctrl->delete_work))
return -EBUSY;
return 0;
@ -1744,8 +1702,8 @@ static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
{
struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, reset_work);
struct nvme_rdma_ctrl *ctrl =
container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
int ret;
bool changed;
@ -1786,22 +1744,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
del_dead_ctrl:
/* Deleting this dead controller... */
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
WARN_ON(!queue_work(nvme_rdma_wq, &ctrl->delete_work));
}
static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
return -EBUSY;
if (!queue_work(nvme_rdma_wq, &ctrl->reset_work))
return -EBUSY;
flush_work(&ctrl->reset_work);
return 0;
WARN_ON(!queue_work(nvme_wq, &ctrl->delete_work));
}
static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
@ -1811,7 +1754,6 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
.reset_ctrl = nvme_rdma_reset_ctrl,
.free_ctrl = nvme_rdma_free_ctrl,
.submit_async_event = nvme_rdma_submit_async_event,
.delete_ctrl = nvme_rdma_del_ctrl,
@ -1920,8 +1862,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
nvme_rdma_reconnect_ctrl_work);
INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work);
INIT_WORK(&ctrl->reset_work, nvme_rdma_reset_ctrl_work);
spin_lock_init(&ctrl->lock);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
ctrl->ctrl.sqsize = opts->queue_size - 1;
@ -1940,12 +1881,14 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
/* sanity check icdoff */
if (ctrl->ctrl.icdoff) {
dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
ret = -EINVAL;
goto out_remove_admin_queue;
}
/* sanity check keyed sgls */
if (!(ctrl->ctrl.sgls & (1 << 20))) {
dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not support\n");
ret = -EINVAL;
goto out_remove_admin_queue;
}
@ -2034,7 +1977,7 @@ static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
}
mutex_unlock(&nvme_rdma_ctrl_mutex);
flush_workqueue(nvme_rdma_wq);
flush_workqueue(nvme_wq);
}
static struct ib_client nvme_rdma_ib_client = {
@ -2047,13 +1990,9 @@ static int __init nvme_rdma_init_module(void)
{
int ret;
nvme_rdma_wq = create_workqueue("nvme_rdma_wq");
if (!nvme_rdma_wq)
return -ENOMEM;
ret = ib_register_client(&nvme_rdma_ib_client);
if (ret)
goto err_destroy_wq;
return ret;
ret = nvmf_register_transport(&nvme_rdma_transport);
if (ret)
@ -2063,8 +2002,6 @@ static int __init nvme_rdma_init_module(void)
err_unreg_client:
ib_unregister_client(&nvme_rdma_ib_client);
err_destroy_wq:
destroy_workqueue(nvme_rdma_wq);
return ret;
}
@ -2072,7 +2009,6 @@ static void __exit nvme_rdma_cleanup_module(void)
{
nvmf_unregister_transport(&nvme_rdma_transport);
ib_unregister_client(&nvme_rdma_ib_client);
destroy_workqueue(nvme_rdma_wq);
}
module_init(nvme_rdma_init_module);

View file

@ -336,7 +336,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
static void nvmet_execute_identify_nslist(struct nvmet_req *req)
{
static const int buf_size = 4096;
static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_ns *ns;
u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
@ -367,6 +367,64 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req)
nvmet_req_complete(req, status);
}
static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
void *id, off_t *off)
{
struct nvme_ns_id_desc desc = {
.nidt = type,
.nidl = len,
};
u16 status;
status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
if (status)
return status;
*off += sizeof(desc);
status = nvmet_copy_to_sgl(req, *off, id, len);
if (status)
return status;
*off += len;
return 0;
}
static void nvmet_execute_identify_desclist(struct nvmet_req *req)
{
struct nvmet_ns *ns;
u16 status = 0;
off_t off = 0;
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
if (!ns) {
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
goto out;
}
if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
NVME_NIDT_UUID_LEN,
&ns->uuid, &off);
if (status)
goto out_put_ns;
}
if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
NVME_NIDT_NGUID_LEN,
&ns->nguid, &off);
if (status)
goto out_put_ns;
}
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
off) != NVME_IDENTIFY_DATA_SIZE - off)
status = NVME_SC_INTERNAL | NVME_SC_DNR;
out_put_ns:
nvmet_put_namespace(ns);
out:
nvmet_req_complete(req, status);
}
/*
* A "mimimum viable" abort implementation: the command is mandatory in the
* spec, but we are not required to do any useful work. We couldn't really
@ -504,7 +562,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
}
break;
case nvme_admin_identify:
req->data_len = 4096;
req->data_len = NVME_IDENTIFY_DATA_SIZE;
switch (cmd->identify.cns) {
case NVME_ID_CNS_NS:
req->execute = nvmet_execute_identify_ns;
@ -515,6 +573,9 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
case NVME_ID_CNS_NS_ACTIVE_LIST:
req->execute = nvmet_execute_identify_nslist;
return 0;
case NVME_ID_CNS_NS_DESC_LIST:
req->execute = nvmet_execute_identify_desclist;
return 0;
}
break;
case nvme_admin_abort_cmd:

View file

@ -305,11 +305,41 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_ns_, device_path);
static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
{
return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
}
static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_ns *ns = to_nvmet_ns(item);
struct nvmet_subsys *subsys = ns->subsys;
int ret = 0;
mutex_lock(&subsys->lock);
if (ns->enabled) {
ret = -EBUSY;
goto out_unlock;
}
if (uuid_parse(page, &ns->uuid))
ret = -EINVAL;
out_unlock:
mutex_unlock(&subsys->lock);
return ret ? ret : count;
}
static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
{
return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
}
CONFIGFS_ATTR(nvmet_ns_, device_uuid);
static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
const char *page, size_t count)
{
@ -379,6 +409,7 @@ CONFIGFS_ATTR(nvmet_ns_, enable);
static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_device_path,
&nvmet_ns_attr_device_nguid,
&nvmet_ns_attr_device_uuid,
&nvmet_ns_attr_enable,
NULL,
};
@ -619,8 +650,45 @@ static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
static ssize_t nvmet_subsys_version_show(struct config_item *item,
char *page)
{
struct nvmet_subsys *subsys = to_subsys(item);
if (NVME_TERTIARY(subsys->ver))
return snprintf(page, PAGE_SIZE, "%d.%d.%d\n",
(int)NVME_MAJOR(subsys->ver),
(int)NVME_MINOR(subsys->ver),
(int)NVME_TERTIARY(subsys->ver));
else
return snprintf(page, PAGE_SIZE, "%d.%d\n",
(int)NVME_MAJOR(subsys->ver),
(int)NVME_MINOR(subsys->ver));
}
static ssize_t nvmet_subsys_version_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_subsys *subsys = to_subsys(item);
int major, minor, tertiary = 0;
int ret;
ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
if (ret != 2 && ret != 3)
return -EINVAL;
down_write(&nvmet_config_sem);
subsys->ver = NVME_VS(major, minor, tertiary);
up_write(&nvmet_config_sem);
return count;
}
CONFIGFS_ATTR(nvmet_subsys_, version);
static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_allow_any_host,
&nvmet_subsys_attr_version,
NULL,
};

View file

@ -380,6 +380,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
ns->nsid = nsid;
ns->subsys = subsys;
uuid_gen(&ns->uuid);
return ns;
}
@ -926,7 +927,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
if (!subsys)
return NULL;
subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
switch (type) {
case NVME_NQN_NVME:

View file

@ -185,7 +185,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
case nvme_admin_identify:
req->data_len = 4096;
req->data_len = NVME_IDENTIFY_DATA_SIZE;
switch (cmd->identify.cns) {
case NVME_ID_CNS_CTRL:
req->execute =

View file

@ -569,7 +569,6 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *tgt_fcpreq)
{
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
int active;
/*
* mark aborted only in case there were 2 threads in transport
@ -577,7 +576,6 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
* after the abort request
*/
spin_lock(&tfcp_req->reqlock);
active = tfcp_req->active;
tfcp_req->aborted = true;
spin_unlock(&tfcp_req->reqlock);

View file

@ -45,7 +45,6 @@ struct nvme_loop_iod {
};
struct nvme_loop_ctrl {
spinlock_t lock;
struct nvme_loop_queue *queues;
u32 queue_count;
@ -59,7 +58,6 @@ struct nvme_loop_ctrl {
struct nvmet_ctrl *target_ctrl;
struct work_struct delete_work;
struct work_struct reset_work;
};
static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
@ -151,7 +149,7 @@ nvme_loop_timeout(struct request *rq, bool reserved)
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
/* queue error recovery */
schedule_work(&iod->queue->ctrl->reset_work);
nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
/* fail with DNR on admin cmd timeout */
nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
@ -233,15 +231,10 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req),
hctx_idx + 1);
}
struct nvme_loop_ctrl *ctrl = set->driver_data;
static int nvme_loop_init_admin_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req), 0);
return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
}
static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@ -279,7 +272,7 @@ static const struct blk_mq_ops nvme_loop_mq_ops = {
static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
.queue_rq = nvme_loop_queue_rq,
.complete = nvme_loop_complete_rq,
.init_request = nvme_loop_init_admin_request,
.init_request = nvme_loop_init_request,
.init_hctx = nvme_loop_init_admin_hctx,
.timeout = nvme_loop_timeout,
};
@ -466,7 +459,7 @@ static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
return -EBUSY;
if (!schedule_work(&ctrl->delete_work))
if (!queue_work(nvme_wq, &ctrl->delete_work))
return -EBUSY;
return 0;
@ -500,8 +493,8 @@ static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
static void nvme_loop_reset_ctrl_work(struct work_struct *work)
{
struct nvme_loop_ctrl *ctrl = container_of(work,
struct nvme_loop_ctrl, reset_work);
struct nvme_loop_ctrl *ctrl =
container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
bool changed;
int ret;
@ -539,21 +532,6 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
nvme_put_ctrl(&ctrl->ctrl);
}
static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
return -EBUSY;
if (!schedule_work(&ctrl->reset_work))
return -EBUSY;
flush_work(&ctrl->reset_work);
return 0;
}
static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
.name = "loop",
.module = THIS_MODULE,
@ -561,7 +539,6 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
.reset_ctrl = nvme_loop_reset_ctrl,
.free_ctrl = nvme_loop_free_ctrl,
.submit_async_event = nvme_loop_submit_async_event,
.delete_ctrl = nvme_loop_del_ctrl,
@ -628,15 +605,13 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
INIT_LIST_HEAD(&ctrl->list);
INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
0 /* no quirks, we're perfect! */);
if (ret)
goto out_put_ctrl;
spin_lock_init(&ctrl->lock);
ret = -ENOMEM;
ctrl->ctrl.sqsize = opts->queue_size - 1;
@ -765,7 +740,7 @@ static void __exit nvme_loop_cleanup_module(void)
__nvme_loop_del_ctrl(ctrl);
mutex_unlock(&nvme_loop_ctrl_mutex);
flush_scheduled_work();
flush_workqueue(nvme_wq);
}
module_init(nvme_loop_init_module);

View file

@ -21,6 +21,7 @@
#include <linux/percpu-refcount.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/uuid.h>
#include <linux/nvme.h>
#include <linux/configfs.h>
#include <linux/rcupdate.h>
@ -46,6 +47,7 @@ struct nvmet_ns {
u32 blksize_shift;
loff_t size;
u8 nguid[16];
uuid_t uuid;
bool enabled;
struct nvmet_subsys *subsys;

View file

@ -21,13 +21,12 @@
#include "pci.h"
/*
* The UUID is defined in the PCI Firmware Specification available here:
* The GUID is defined in the PCI Firmware Specification available here:
* https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
*/
const u8 pci_acpi_dsm_uuid[] = {
0xd0, 0x37, 0xc9, 0xe5, 0x53, 0x35, 0x7a, 0x4d,
0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d
};
const guid_t pci_acpi_dsm_guid =
GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
@ -680,7 +679,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
if (!pci_is_root_bus(bus))
return;
obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), pci_acpi_dsm_uuid, 3,
obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
RESET_DELAY_DSM, NULL);
if (!obj)
return;
@ -745,7 +744,7 @@ static void pci_acpi_optimize_delay(struct pci_dev *pdev,
if (bridge->ignore_reset_delay)
pdev->d3cold_delay = 0;
obj = acpi_evaluate_dsm(handle, pci_acpi_dsm_uuid, 3,
obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
FUNCTION_DELAY_DSM, NULL);
if (!obj)
return;

View file

@ -172,7 +172,7 @@ static int dsm_get_label(struct device *dev, char *buf,
if (!handle)
return -1;
obj = acpi_evaluate_dsm(handle, pci_acpi_dsm_uuid, 0x2,
obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 0x2,
DEVICE_LABEL_DSM, NULL);
if (!obj)
return -1;
@ -212,7 +212,7 @@ static bool device_has_dsm(struct device *dev)
if (!handle)
return false;
return !!acpi_check_dsm(handle, pci_acpi_dsm_uuid, 0x2,
return !!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 0x2,
1 << DEVICE_LABEL_DSM);
}

View file

@ -245,7 +245,7 @@ struct sdebug_dev_info {
unsigned int channel;
unsigned int target;
u64 lun;
uuid_be lu_name;
uuid_t lu_name;
struct sdebug_host_info *sdbg_host;
unsigned long uas_bm[1];
atomic_t num_in_q;
@ -965,7 +965,7 @@ static const u64 naa3_comp_c = 0x3111111000000000ULL;
static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
int target_dev_id, int dev_id_num,
const char *dev_id_str, int dev_id_str_len,
const uuid_be *lu_name)
const uuid_t *lu_name)
{
int num, port_a;
char b[32];
@ -3568,7 +3568,7 @@ static void sdebug_q_cmd_wq_complete(struct work_struct *work)
}
static bool got_shared_uuid;
static uuid_be shared_uuid;
static uuid_t shared_uuid;
static struct sdebug_dev_info *sdebug_device_create(
struct sdebug_host_info *sdbg_host, gfp_t flags)
@ -3578,12 +3578,12 @@ static struct sdebug_dev_info *sdebug_device_create(
devip = kzalloc(sizeof(*devip), flags);
if (devip) {
if (sdebug_uuid_ctl == 1)
uuid_be_gen(&devip->lu_name);
uuid_gen(&devip->lu_name);
else if (sdebug_uuid_ctl == 2) {
if (got_shared_uuid)
devip->lu_name = shared_uuid;
else {
uuid_be_gen(&shared_uuid);
uuid_gen(&shared_uuid);
got_shared_uuid = true;
devip->lu_name = shared_uuid;
}

View file

@ -23,7 +23,7 @@ enum int3400_thermal_uuid {
INT3400_THERMAL_MAXIMUM_UUID,
};
static u8 *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
"42A441D6-AE6A-462b-A84B-4A8CE79027D3",
"3A95C389-E4B8-4629-A526-C52C88626BAE",
"97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
@ -141,10 +141,10 @@ static int int3400_thermal_get_uuids(struct int3400_thermal_priv *priv)
}
for (j = 0; j < INT3400_THERMAL_MAXIMUM_UUID; j++) {
u8 uuid[16];
guid_t guid;
acpi_str_to_uuid(int3400_thermal_uuids[j], uuid);
if (!strncmp(uuid, objb->buffer.pointer, 16)) {
guid_parse(int3400_thermal_uuids[j], &guid);
if (guid_equal((guid_t *)objb->buffer.pointer, &guid)) {
priv->uuid_bitmap |= (1 << j);
break;
}

View file

@ -42,7 +42,7 @@
#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
#define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
#define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
#define PCI_INTEL_BXT_STATE_D0 0
#define PCI_INTEL_BXT_STATE_D3 3
@ -51,14 +51,14 @@
* struct dwc3_pci - Driver private structure
* @dwc3: child dwc3 platform_device
* @pci: our link to PCI bus
* @uuid: _DSM UUID
* @guid: _DSM GUID
* @has_dsm_for_pm: true for devices which need to run _DSM on runtime PM
*/
struct dwc3_pci {
struct platform_device *dwc3;
struct pci_dev *pci;
u8 uuid[16];
guid_t guid;
unsigned int has_dsm_for_pm:1;
};
@ -120,7 +120,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
if (pdev->device == PCI_DEVICE_ID_INTEL_BXT ||
pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) {
acpi_str_to_uuid(PCI_INTEL_BXT_DSM_UUID, dwc->uuid);
guid_parse(PCI_INTEL_BXT_DSM_GUID, &dwc->guid);
dwc->has_dsm_for_pm = true;
}
@ -292,7 +292,7 @@ static int dwc3_pci_dsm(struct dwc3_pci *dwc, int param)
tmp.type = ACPI_TYPE_INTEGER;
tmp.integer.value = param;
obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), dwc->uuid,
obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), &dwc->guid,
1, PCI_INTEL_BXT_FUNC_PMU_PWR, &argv4);
if (!obj) {
dev_err(&dwc->pci->dev, "failed to evaluate _DSM\n");

View file

@ -213,13 +213,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
#ifdef CONFIG_ACPI
static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
{
static const u8 intel_dsm_uuid[] = {
0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
};
static const guid_t intel_dsm_guid =
GUID_INIT(0xac340cb7, 0xe901, 0x45bf,
0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23);
union acpi_object *obj;
obj = acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1,
obj = acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), &intel_dsm_guid, 3, 1,
NULL);
ACPI_FREE(obj);
}

View file

@ -55,13 +55,13 @@ struct ucsi {
static int ucsi_acpi_cmd(struct ucsi *ucsi, struct ucsi_control *ctrl)
{
uuid_le uuid = UUID_LE(0x6f8398c2, 0x7ca4, 0x11e4,
0xad, 0x36, 0x63, 0x10, 0x42, 0xb5, 0x00, 0x8f);
guid_t guid = GUID_INIT(0x6f8398c2, 0x7ca4, 0x11e4,
0xad, 0x36, 0x63, 0x10, 0x42, 0xb5, 0x00, 0x8f);
union acpi_object *obj;
ucsi->data->ctrl.raw_cmd = ctrl->raw_cmd;
obj = acpi_evaluate_dsm(ACPI_HANDLE(ucsi->dev), uuid.b, 1, 1, NULL);
obj = acpi_evaluate_dsm(ACPI_HANDLE(ucsi->dev), &guid, 1, 1, NULL);
if (!obj) {
dev_err(ucsi->dev, "%s: failed to evaluate _DSM\n", __func__);
return -EIO;

View file

@ -105,8 +105,8 @@ enum wcove_typec_role {
WCOVE_ROLE_DEVICE,
};
static uuid_le uuid = UUID_LE(0x482383f0, 0x2876, 0x4e49,
0x86, 0x85, 0xdb, 0x66, 0x21, 0x1a, 0xf0, 0x37);
static guid_t guid = GUID_INIT(0x482383f0, 0x2876, 0x4e49,
0x86, 0x85, 0xdb, 0x66, 0x21, 0x1a, 0xf0, 0x37);
static int wcove_typec_func(struct wcove_typec *wcove,
enum wcove_typec_func func, int param)
@ -118,7 +118,7 @@ static int wcove_typec_func(struct wcove_typec *wcove,
tmp.type = ACPI_TYPE_INTEGER;
tmp.integer.value = param;
obj = acpi_evaluate_dsm(ACPI_HANDLE(wcove->dev), uuid.b, 1, func,
obj = acpi_evaluate_dsm(ACPI_HANDLE(wcove->dev), &guid, 1, func,
&argv4);
if (!obj) {
dev_err(wcove->dev, "%s: failed to evaluate _DSM\n", __func__);
@ -314,7 +314,7 @@ static int wcove_typec_probe(struct platform_device *pdev)
if (ret)
return ret;
if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), uuid.b, 0, 0x1f)) {
if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &guid, 0, 0x1f)) {
dev_err(&pdev->dev, "Missing _DSM functions\n");
return -ENODEV;
}

View file

@ -233,12 +233,12 @@ static int tmem_cleancache_init_fs(size_t pagesize)
return xen_tmem_new_pool(uuid_private, 0, pagesize);
}
static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
static int tmem_cleancache_init_shared_fs(uuid_t *uuid, size_t pagesize)
{
struct tmem_pool_uuid shared_uuid;
shared_uuid.uuid_lo = *(u64 *)uuid;
shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
shared_uuid.uuid_lo = *(u64 *)&uuid->b[0];
shared_uuid.uuid_hi = *(u64 *)&uuid->b[8];
return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
}

View file

@ -350,7 +350,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
{
struct sockaddr_rxrpc srx;
struct afs_server *server;
struct uuid_v1 *r;
struct afs_uuid *r;
unsigned loop;
__be32 *b;
int ret;
@ -380,7 +380,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
}
_debug("unmarshall UUID");
call->request = kmalloc(sizeof(struct uuid_v1), GFP_KERNEL);
call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
if (!call->request)
return -ENOMEM;
@ -453,7 +453,7 @@ static int afs_deliver_cb_probe(struct afs_call *call)
static void SRXAFSCB_ProbeUuid(struct work_struct *work)
{
struct afs_call *call = container_of(work, struct afs_call, work);
struct uuid_v1 *r = call->request;
struct afs_uuid *r = call->request;
struct {
__be32 match;
@ -476,7 +476,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
*/
static int afs_deliver_cb_probe_uuid(struct afs_call *call)
{
struct uuid_v1 *r;
struct afs_uuid *r;
unsigned loop;
__be32 *b;
int ret;
@ -502,15 +502,15 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
}
_debug("unmarshall UUID");
call->request = kmalloc(sizeof(struct uuid_v1), GFP_KERNEL);
call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
if (!call->request)
return -ENOMEM;
b = call->buffer;
r = call->request;
r->time_low = b[0];
r->time_mid = htons(ntohl(b[1]));
r->time_hi_and_version = htons(ntohl(b[2]));
r->time_low = ntohl(b[0]);
r->time_mid = ntohl(b[1]);
r->time_hi_and_version = ntohl(b[2]);
r->clock_seq_hi_and_reserved = ntohl(b[3]);
r->clock_seq_low = ntohl(b[4]);

View file

@ -410,6 +410,15 @@ struct afs_interface {
unsigned mtu; /* MTU of interface */
};
struct afs_uuid {
__be32 time_low; /* low part of timestamp */
__be16 time_mid; /* mid part of timestamp */
__be16 time_hi_and_version; /* high part of timestamp and version */
__u8 clock_seq_hi_and_reserved; /* clock seq hi and variant */
__u8 clock_seq_low; /* clock seq low */
__u8 node[6]; /* spatially unique node ID (MAC addr) */
};
/*****************************************************************************/
/*
* cache.c
@ -544,7 +553,7 @@ extern int afs_drop_inode(struct inode *);
* main.c
*/
extern struct workqueue_struct *afs_wq;
extern struct uuid_v1 afs_uuid;
extern struct afs_uuid afs_uuid;
/*
* misc.c

View file

@ -31,7 +31,7 @@ static char *rootcell;
module_param(rootcell, charp, 0);
MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list");
struct uuid_v1 afs_uuid;
struct afs_uuid afs_uuid;
struct workqueue_struct *afs_wq;
/*

View file

@ -3950,7 +3950,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sb->s_qcop = &ext4_qctl_operations;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
#endif
memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
mutex_init(&sbi->s_orphan_lock);

View file

@ -1937,7 +1937,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_time_gran = 1;
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
/* init f2fs-specific super block info */
sbi->valid_super_block = valid_super_block;

View file

@ -203,7 +203,7 @@ static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
memcpy(s->s_uuid, str->sb_uuid, 16);
memcpy(&s->s_uuid, str->sb_uuid, 16);
}
/**

View file

@ -71,25 +71,14 @@ static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf)
return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname);
}
static int gfs2_uuid_valid(const u8 *uuid)
{
int i;
for (i = 0; i < 16; i++) {
if (uuid[i])
return 1;
}
return 0;
}
static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
{
struct super_block *s = sdp->sd_vfs;
const u8 *uuid = s->s_uuid;
buf[0] = '\0';
if (!gfs2_uuid_valid(uuid))
if (uuid_is_null(&s->s_uuid))
return 0;
return snprintf(buf, PAGE_SIZE, "%pUB\n", uuid);
return snprintf(buf, PAGE_SIZE, "%pUB\n", &s->s_uuid);
}
static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
@ -712,14 +701,13 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
{
struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
struct super_block *s = sdp->sd_vfs;
const u8 *uuid = s->s_uuid;
add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags))
add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid);
if (gfs2_uuid_valid(uuid))
add_uevent_var(env, "UUID=%pUB", uuid);
if (!uuid_is_null(&s->s_uuid))
add_uevent_var(env, "UUID=%pUB", &s->s_uuid);
return 0;
}

View file

@ -486,7 +486,7 @@ secinfo_parse(char **mesg, char *buf, struct svc_export *exp) { return 0; }
#endif
static inline int
uuid_parse(char **mesg, char *buf, unsigned char **puuid)
nfsd_uuid_parse(char **mesg, char *buf, unsigned char **puuid)
{
int len;
@ -586,7 +586,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
if (strcmp(buf, "fsloc") == 0)
err = fsloc_parse(&mesg, buf, &exp.ex_fslocs);
else if (strcmp(buf, "uuid") == 0)
err = uuid_parse(&mesg, buf, &exp.ex_uuid);
err = nfsd_uuid_parse(&mesg, buf, &exp.ex_uuid);
else if (strcmp(buf, "secinfo") == 0)
err = secinfo_parse(&mesg, buf, &exp);
else

View file

@ -2062,7 +2062,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits);
bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
sb->s_maxbytes = ocfs2_max_file_offset(bbits, cbits);
memcpy(sb->s_uuid, di->id2.i_super.s_uuid,
memcpy(&sb->s_uuid, di->id2.i_super.s_uuid,
sizeof(di->id2.i_super.s_uuid));
osb->osb_dx_mask = (1 << (cbits - bbits)) - 1;

View file

@ -233,7 +233,7 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
return err;
}
static struct ovl_fh *ovl_encode_fh(struct dentry *lower, uuid_be *uuid)
static struct ovl_fh *ovl_encode_fh(struct dentry *lower, uuid_t *uuid)
{
struct ovl_fh *fh;
int fh_type, fh_len, dwords;
@ -284,7 +284,6 @@ static int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
struct dentry *upper)
{
struct super_block *sb = lower->d_sb;
uuid_be *uuid = (uuid_be *) &sb->s_uuid;
const struct ovl_fh *fh = NULL;
int err;
@ -294,8 +293,8 @@ static int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
* up and a pure upper inode.
*/
if (sb->s_export_op && sb->s_export_op->fh_to_dentry &&
uuid_be_cmp(*uuid, NULL_UUID_BE)) {
fh = ovl_encode_fh(lower, uuid);
!uuid_is_null(&sb->s_uuid)) {
fh = ovl_encode_fh(lower, &sb->s_uuid);
if (IS_ERR(fh))
return PTR_ERR(fh);
}

View file

@ -135,7 +135,7 @@ static struct dentry *ovl_get_origin(struct dentry *dentry,
* Make sure that the stored uuid matches the uuid of the lower
* layer where file handle will be decoded.
*/
if (uuid_be_cmp(fh->uuid, *(uuid_be *) &mnt->mnt_sb->s_uuid))
if (!uuid_equal(&fh->uuid, &mnt->mnt_sb->s_uuid))
goto out;
origin = exportfs_decode_fh(mnt, (struct fid *)fh->fid,

View file

@ -56,7 +56,7 @@ struct ovl_fh {
u8 len; /* size of this header + size of fid */
u8 flags; /* OVL_FH_FLAG_* */
u8 type; /* fid_type of fid */
uuid_be uuid; /* uuid of filesystem */
uuid_t uuid; /* uuid of filesystem */
u8 fid[0]; /* file identifier */
} __packed;

View file

@ -98,8 +98,7 @@ xfs-y += xfs_aops.o \
xfs_sysfs.o \
xfs_trans.o \
xfs_xattr.o \
kmem.o \
uuid.o
kmem.o
# low-level transaction/log code
xfs-y += xfs_log.o \

View file

@ -1,63 +0,0 @@
/*
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <xfs.h>
/* IRIX interpretation of an uuid_t */
typedef struct {
__be32 uu_timelow;
__be16 uu_timemid;
__be16 uu_timehi;
__be16 uu_clockseq;
__be16 uu_node[3];
} xfs_uu_t;
/*
* uuid_getnodeuniq - obtain the node unique fields of a UUID.
*
* This is not in any way a standard or condoned UUID function;
* it just something that's needed for user-level file handles.
*/
void
uuid_getnodeuniq(uuid_t *uuid, int fsid [2])
{
xfs_uu_t *uup = (xfs_uu_t *)uuid;
fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) |
be16_to_cpu(uup->uu_timemid);
fsid[1] = be32_to_cpu(uup->uu_timelow);
}
int
uuid_is_nil(uuid_t *uuid)
{
int i;
char *cp = (char *)uuid;
if (uuid == NULL)
return 0;
/* implied check of version number here... */
for (i = 0; i < sizeof *uuid; i++)
if (*cp++) return 0; /* not nil */
return 1; /* is nil */
}
int
uuid_equal(uuid_t *uuid1, uuid_t *uuid2)
{
return memcmp(uuid1, uuid2, sizeof(uuid_t)) ? 0 : 1;
}

View file

@ -1,35 +0,0 @@
/*
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __XFS_SUPPORT_UUID_H__
#define __XFS_SUPPORT_UUID_H__
typedef struct {
unsigned char __u_bits[16];
} uuid_t;
extern int uuid_is_nil(uuid_t *uuid);
extern int uuid_equal(uuid_t *uuid1, uuid_t *uuid2);
extern void uuid_getnodeuniq(uuid_t *uuid, int fsid [2]);
static inline void
uuid_copy(uuid_t *dst, uuid_t *src)
{
memcpy(dst, src, sizeof(uuid_t));
}
#endif /* __XFS_SUPPORT_UUID_H__ */

View file

@ -834,9 +834,7 @@ xfs_inode_item_format_convert(
in_f->ilf_dsize = in_f32->ilf_dsize;
in_f->ilf_ino = in_f32->ilf_ino;
/* copy biggest field of ilf_u */
memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
in_f32->ilf_u.ilfu_uuid.__u_bits,
sizeof(uuid_t));
uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
in_f->ilf_blkno = in_f32->ilf_blkno;
in_f->ilf_len = in_f32->ilf_len;
in_f->ilf_boffset = in_f32->ilf_boffset;
@ -851,9 +849,7 @@ xfs_inode_item_format_convert(
in_f->ilf_dsize = in_f64->ilf_dsize;
in_f->ilf_ino = in_f64->ilf_ino;
/* copy biggest field of ilf_u */
memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
in_f64->ilf_u.ilfu_uuid.__u_bits,
sizeof(uuid_t));
uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f64->ilf_u.ilfu_uuid);
in_f->ilf_blkno = in_f64->ilf_blkno;
in_f->ilf_len = in_f64->ilf_len;
in_f->ilf_boffset = in_f64->ilf_boffset;

View file

@ -19,6 +19,7 @@
#define __XFS_LINUX__
#include <linux/types.h>
#include <linux/uuid.h>
/*
* Kernel specific type declarations for XFS
@ -42,7 +43,6 @@ typedef __u32 xfs_nlink_t;
#include "kmem.h"
#include "mrlock.h"
#include "uuid.h"
#include <linux/semaphore.h>
#include <linux/mm.h>

View file

@ -352,13 +352,13 @@ xlog_header_check_mount(
{
ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
if (uuid_is_nil(&head->h_fs_uuid)) {
if (uuid_is_null(&head->h_fs_uuid)) {
/*
* IRIX doesn't write the h_fs_uuid or h_fmt fields. If
* h_fs_uuid is nil, we assume this log was last mounted
* h_fs_uuid is null, we assume this log was last mounted
* by IRIX and continue.
*/
xfs_warn(mp, "nil uuid in log - IRIX style log");
xfs_warn(mp, "null uuid in log - IRIX style log");
} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
xfs_warn(mp, "log has mismatched uuid - can't recover");
xlog_header_check_dump(mp, head);

View file

@ -74,20 +74,19 @@ xfs_uuid_mount(
int hole, i;
/* Publish UUID in struct super_block */
BUILD_BUG_ON(sizeof(mp->m_super->s_uuid) != sizeof(uuid_t));
memcpy(&mp->m_super->s_uuid, uuid, sizeof(uuid_t));
uuid_copy(&mp->m_super->s_uuid, uuid);
if (mp->m_flags & XFS_MOUNT_NOUUID)
return 0;
if (uuid_is_nil(uuid)) {
xfs_warn(mp, "Filesystem has nil UUID - can't mount");
if (uuid_is_null(uuid)) {
xfs_warn(mp, "Filesystem has null UUID - can't mount");
return -EINVAL;
}
mutex_lock(&xfs_uuid_table_mutex);
for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
if (uuid_is_nil(&xfs_uuid_table[i])) {
if (uuid_is_null(&xfs_uuid_table[i])) {
hole = i;
continue;
}
@ -124,7 +123,7 @@ xfs_uuid_unmount(
mutex_lock(&xfs_uuid_table_mutex);
for (i = 0; i < xfs_uuid_table_size; i++) {
if (uuid_is_nil(&xfs_uuid_table[i]))
if (uuid_is_null(&xfs_uuid_table[i]))
continue;
if (!uuid_equal(uuid, &xfs_uuid_table[i]))
continue;
@ -793,7 +792,10 @@ xfs_mountfs(
* Copies the low order bits of the timestamp and the randomly
* set "sequence" number out of a UUID.
*/
uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid);
mp->m_fixedfsid[0] =
(get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) |
get_unaligned_be16(&sbp->sb_uuid.b[4]);
mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]);
mp->m_dmevmask = 0; /* not persistent; set after each mount */

View file

@ -61,17 +61,18 @@ bool acpi_ata_match(acpi_handle handle);
bool acpi_bay_match(acpi_handle handle);
bool acpi_dock_match(acpi_handle handle);
bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs);
union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs);
union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
u64 rev, u64 func, union acpi_object *argv4);
static inline union acpi_object *
acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
union acpi_object *argv4, acpi_object_type type)
acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
u64 func, union acpi_object *argv4,
acpi_object_type type)
{
union acpi_object *obj;
obj = acpi_evaluate_dsm(handle, uuid, rev, func, argv4);
obj = acpi_evaluate_dsm(handle, guid, rev, func, argv4);
if (obj && obj->type != type) {
ACPI_FREE(obj);
obj = NULL;

View file

@ -26,6 +26,7 @@
#include <linux/resource_ext.h>
#include <linux/device.h>
#include <linux/property.h>
#include <linux/uuid.h>
#ifndef _LINUX
#define _LINUX
@ -457,7 +458,6 @@ struct acpi_osc_context {
struct acpi_buffer ret; /* free by caller if success */
};
acpi_status acpi_str_to_uuid(char *str, u8 *uuid);
acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
@ -741,7 +741,7 @@ static inline bool acpi_driver_match_device(struct device *dev,
}
static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
const u8 *uuid,
const guid_t *guid,
int rev, int func,
union acpi_object *argv4)
{

View file

@ -27,7 +27,7 @@ struct cleancache_filekey {
struct cleancache_ops {
int (*init_fs)(size_t);
int (*init_shared_fs)(char *uuid, size_t);
int (*init_shared_fs)(uuid_t *uuid, size_t);
int (*get_page)(int, struct cleancache_filekey,
pgoff_t, struct page *);
void (*put_page)(int, struct cleancache_filekey,

View file

@ -30,6 +30,7 @@
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
#include <linux/delayed_call.h>
#include <linux/uuid.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
@ -1328,8 +1329,8 @@ struct super_block {
struct sb_writers s_writers;
char s_id[32]; /* Informational name */
u8 s_uuid[16]; /* UUID */
char s_id[32]; /* Informational name */
uuid_t s_uuid; /* UUID */
void *s_fs_info; /* Filesystem private info */
unsigned int s_max_links;

View file

@ -219,12 +219,6 @@ static inline struct gendisk *part_to_disk(struct hd_struct *part)
return NULL;
}
static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to)
{
uuid_be_to_bin(uuid_str, (uuid_be *)to);
return 0;
}
static inline int disk_max_parts(struct gendisk *disk)
{
if (disk->flags & GENHD_FL_EXT_DEVT)
@ -736,11 +730,6 @@ static inline dev_t blk_lookup_devt(const char *name, int partno)
dev_t devt = MKDEV(0, 0);
return devt;
}
static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to)
{
return -EINVAL;
}
#endif /* CONFIG_BLOCK */
#endif /* _LINUX_GENHD_H */

View file

@ -177,7 +177,6 @@ struct fcnvme_lsdesc_rjt {
};
#define FCNVME_ASSOC_HOSTID_LEN 16
#define FCNVME_ASSOC_HOSTNQN_LEN 256
#define FCNVME_ASSOC_SUBNQN_LEN 256
@ -191,7 +190,7 @@ struct fcnvme_lsdesc_cr_assoc_cmd {
__be16 cntlid;
__be16 sqsize;
__be32 rsvd52;
u8 hostid[FCNVME_ASSOC_HOSTID_LEN];
uuid_t hostid;
u8 hostnqn[FCNVME_ASSOC_HOSTNQN_LEN];
u8 subnqn[FCNVME_ASSOC_SUBNQN_LEN];
u8 rsvd632[384];

View file

@ -16,6 +16,7 @@
#define _LINUX_NVME_H
#include <linux/types.h>
#include <linux/uuid.h>
/* NQN names in commands fields specified one size */
#define NVMF_NQN_FIELD_LEN 256
@ -101,6 +102,7 @@ enum {
NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
};
#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
@ -207,9 +209,15 @@ struct nvme_id_ctrl {
__u8 tnvmcap[16];
__u8 unvmcap[16];
__le32 rpmbs;
__u8 rsvd316[4];
__le16 edstt;
__u8 dsto;
__u8 fwug;
__le16 kas;
__u8 rsvd322[190];
__le16 hctma;
__le16 mntmt;
__le16 mxtmt;
__le32 sanicap;
__u8 rsvd332[180];
__u8 sqes;
__u8 cqes;
__le16 maxcmd;
@ -274,7 +282,7 @@ struct nvme_id_ns {
__le16 nabsn;
__le16 nabo;
__le16 nabspf;
__u16 rsvd46;
__le16 noiob;
__u8 nvmcap[16];
__u8 rsvd64[40];
__u8 nguid[16];
@ -288,6 +296,7 @@ enum {
NVME_ID_CNS_NS = 0x00,
NVME_ID_CNS_CTRL = 0x01,
NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
NVME_ID_CNS_NS_DESC_LIST = 0x03,
NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
NVME_ID_CNS_NS_PRESENT = 0x11,
NVME_ID_CNS_CTRL_NS_LIST = 0x12,
@ -314,6 +323,22 @@ enum {
NVME_NS_DPS_PI_TYPE3 = 3,
};
struct nvme_ns_id_desc {
__u8 nidt;
__u8 nidl;
__le16 reserved;
};
#define NVME_NIDT_EUI64_LEN 8
#define NVME_NIDT_NGUID_LEN 16
#define NVME_NIDT_UUID_LEN 16
enum {
NVME_NIDT_EUI64 = 0x01,
NVME_NIDT_NGUID = 0x02,
NVME_NIDT_UUID = 0x03,
};
struct nvme_smart_log {
__u8 critical_warning;
__u8 temperature[2];
@ -586,6 +611,11 @@ struct nvme_feat_auto_pst {
__le64 entries[32];
};
enum {
NVME_HOST_MEM_ENABLE = (1 << 0),
NVME_HOST_MEM_RETURN = (1 << 1),
};
/* Admin commands */
enum nvme_admin_opcode {
@ -658,6 +688,8 @@ struct nvme_identify {
__u32 rsvd11[5];
};
#define NVME_IDENTIFY_DATA_SIZE 4096
struct nvme_features {
__u8 opcode;
__u8 flags;
@ -667,7 +699,16 @@ struct nvme_features {
union nvme_data_ptr dptr;
__le32 fid;
__le32 dword11;
__u32 rsvd12[4];
__le32 dword12;
__le32 dword13;
__le32 dword14;
__le32 dword15;
};
struct nvme_host_mem_buf_desc {
__le64 addr;
__le32 size;
__u32 rsvd;
};
struct nvme_create_cq {
@ -843,7 +884,7 @@ struct nvmf_connect_command {
};
struct nvmf_connect_data {
__u8 hostid[16];
uuid_t hostid;
__le16 cntlid;
char resv4[238];
char subsysnqn[NVMF_NQN_FIELD_LEN];
@ -1050,4 +1091,8 @@ struct nvme_completion {
#define NVME_VS(major, minor, tertiary) \
(((major) << 16) | ((minor) << 8) | (tertiary))
#define NVME_MAJOR(ver) ((ver) >> 16)
#define NVME_MINOR(ver) (((ver) >> 8) & 0xff)
#define NVME_TERTIARY(ver) ((ver) & 0xff)
#endif /* _LINUX_NVME_H */

View file

@ -105,7 +105,7 @@ static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { }
#endif
extern const u8 pci_acpi_dsm_uuid[];
extern const guid_t pci_acpi_dsm_guid;
#define DEVICE_LABEL_DSM 0x07
#define RESET_DELAY_DSM 0x08
#define FUNCTION_DELAY_DSM 0x09

View file

@ -278,6 +278,8 @@ size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
const void *buf, size_t buflen, off_t skip);
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip);
size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
size_t buflen, off_t skip);
/*
* Maximum number of entries that will be allocated in one piece, if

View file

@ -18,29 +18,16 @@
#include <uapi/linux/uuid.h>
/*
* V1 (time-based) UUID definition [RFC 4122].
* - the timestamp is a 60-bit value, split 32/16/12, and goes in 100ns
* increments since midnight 15th October 1582
* - add AFS_UUID_TO_UNIX_TIME to convert unix time in 100ns units to UUID
* time
* - the clock sequence is a 14-bit counter to avoid duplicate times
*/
struct uuid_v1 {
__be32 time_low; /* low part of timestamp */
__be16 time_mid; /* mid part of timestamp */
__be16 time_hi_and_version; /* high part of timestamp and version */
#define UUID_TO_UNIX_TIME 0x01b21dd213814000ULL
#define UUID_TIMEHI_MASK 0x0fff
#define UUID_VERSION_TIME 0x1000 /* time-based UUID */
#define UUID_VERSION_NAME 0x3000 /* name-based UUID */
#define UUID_VERSION_RANDOM 0x4000 /* (pseudo-)random generated UUID */
u8 clock_seq_hi_and_reserved; /* clock seq hi and variant */
#define UUID_CLOCKHI_MASK 0x3f
#define UUID_VARIANT_STD 0x80
u8 clock_seq_low; /* clock seq low */
u8 node[6]; /* spatially unique node ID (MAC addr) */
};
typedef struct {
__u8 b[16];
} uuid_t;
#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
((uuid_t) \
{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
((b) >> 8) & 0xff, (b) & 0xff, \
((c) >> 8) & 0xff, (c) & 0xff, \
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
/*
* The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")
@ -48,27 +35,73 @@ struct uuid_v1 {
*/
#define UUID_STRING_LEN 36
static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2)
extern const guid_t guid_null;
extern const uuid_t uuid_null;
static inline bool guid_equal(const guid_t *u1, const guid_t *u2)
{
return memcmp(&u1, &u2, sizeof(uuid_le));
return memcmp(u1, u2, sizeof(guid_t)) == 0;
}
static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2)
static inline void guid_copy(guid_t *dst, const guid_t *src)
{
return memcmp(&u1, &u2, sizeof(uuid_be));
memcpy(dst, src, sizeof(guid_t));
}
static inline bool guid_is_null(guid_t *guid)
{
return guid_equal(guid, &guid_null);
}
static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2)
{
return memcmp(u1, u2, sizeof(uuid_t)) == 0;
}
static inline void uuid_copy(uuid_t *dst, const uuid_t *src)
{
memcpy(dst, src, sizeof(uuid_t));
}
static inline bool uuid_is_null(uuid_t *uuid)
{
return uuid_equal(uuid, &uuid_null);
}
void generate_random_uuid(unsigned char uuid[16]);
extern void uuid_le_gen(uuid_le *u);
extern void uuid_be_gen(uuid_be *u);
extern void guid_gen(guid_t *u);
extern void uuid_gen(uuid_t *u);
bool __must_check uuid_is_valid(const char *uuid);
extern const u8 uuid_le_index[16];
extern const u8 uuid_be_index[16];
extern const u8 guid_index[16];
extern const u8 uuid_index[16];
int uuid_le_to_bin(const char *uuid, uuid_le *u);
int uuid_be_to_bin(const char *uuid, uuid_be *u);
int guid_parse(const char *uuid, guid_t *u);
int uuid_parse(const char *uuid, uuid_t *u);
/* backwards compatibility, don't use in new code */
typedef uuid_t uuid_be;
#define UUID_BE(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
UUID_INIT(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7)
#define NULL_UUID_BE \
UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00)
#define uuid_le_gen(u) guid_gen(u)
#define uuid_be_gen(u) uuid_gen(u)
#define uuid_le_to_bin(guid, u) guid_parse(guid, u)
#define uuid_be_to_bin(uuid, u) uuid_parse(uuid, u)
static inline int uuid_le_cmp(const guid_t u1, const guid_t u2)
{
return memcmp(&u1, &u2, sizeof(guid_t));
}
static inline int uuid_be_cmp(const uuid_t u1, const uuid_t u2)
{
return memcmp(&u1, &u2, sizeof(uuid_t));
}
#endif

View file

@ -22,33 +22,21 @@
typedef struct {
__u8 b[16];
} uuid_le;
} guid_t;
typedef struct {
__u8 b[16];
} uuid_be;
#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
((uuid_le) \
#define GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
((guid_t) \
{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
(b) & 0xff, ((b) >> 8) & 0xff, \
(c) & 0xff, ((c) >> 8) & 0xff, \
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
((uuid_be) \
{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
((b) >> 8) & 0xff, (b) & 0xff, \
((c) >> 8) & 0xff, (c) & 0xff, \
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
/* backwards compatibility, don't use in new code */
typedef guid_t uuid_le;
#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
#define NULL_UUID_LE \
UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00)
#define NULL_UUID_BE \
UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00)
0x00, 0x00, 0x00, 0x00)
#endif /* _UAPI_LINUX_UUID_H_ */

View file

@ -1119,7 +1119,7 @@ static ssize_t bin_uuid(struct file *file,
/* Only supports reads */
if (oldval && oldlen) {
char buf[UUID_STRING_LEN + 1];
uuid_be uuid;
uuid_t uuid;
result = kernel_read(file, 0, buf, sizeof(buf) - 1);
if (result < 0)
@ -1128,7 +1128,7 @@ static ssize_t bin_uuid(struct file *file,
buf[result] = '\0';
result = -EIO;
if (uuid_be_to_bin(buf, &uuid))
if (uuid_parse(buf, &uuid))
goto out;
if (oldlen > 16)

View file

@ -751,3 +751,38 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
}
EXPORT_SYMBOL(sg_pcopy_to_buffer);
/**
* sg_zero_buffer - Zero-out a part of a SG list
* @sgl: The SG list
* @nents: Number of SG entries
* @buflen: The number of bytes to zero out
* @skip: Number of bytes to skip before zeroing
*
* Returns the number of bytes zeroed.
**/
size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
size_t buflen, off_t skip)
{
unsigned int offset = 0;
struct sg_mapping_iter miter;
unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
sg_miter_start(&miter, sgl, nents, sg_flags);
if (!sg_miter_skip(&miter, skip))
return false;
while (offset < buflen && sg_miter_next(&miter)) {
unsigned int len;
len = min(miter.length, buflen - offset);
memset(miter.addr, 0, len);
offset += len;
}
sg_miter_stop(&miter);
return offset;
}
EXPORT_SYMBOL(sg_zero_buffer);

View file

@ -11,25 +11,25 @@
struct test_uuid_data {
const char *uuid;
uuid_le le;
uuid_be be;
guid_t le;
uuid_t be;
};
static const struct test_uuid_data test_uuid_test_data[] = {
{
.uuid = "c33f4995-3701-450e-9fbf-206a2e98e576",
.le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
.be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
.le = GUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
.be = UUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
},
{
.uuid = "64b4371c-77c1-48f9-8221-29f054fc023b",
.le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
.be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
.le = GUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
.be = UUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
},
{
.uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84",
.le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
.be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
.le = GUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
.be = UUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
},
};
@ -61,28 +61,28 @@ static void __init test_uuid_failed(const char *prefix, bool wrong, bool be,
static void __init test_uuid_test(const struct test_uuid_data *data)
{
uuid_le le;
uuid_be be;
guid_t le;
uuid_t be;
char buf[48];
/* LE */
total_tests++;
if (uuid_le_to_bin(data->uuid, &le))
if (guid_parse(data->uuid, &le))
test_uuid_failed("conversion", false, false, data->uuid, NULL);
total_tests++;
if (uuid_le_cmp(data->le, le)) {
if (!guid_equal(&data->le, &le)) {
sprintf(buf, "%pUl", &le);
test_uuid_failed("cmp", false, false, data->uuid, buf);
}
/* BE */
total_tests++;
if (uuid_be_to_bin(data->uuid, &be))
if (uuid_parse(data->uuid, &be))
test_uuid_failed("conversion", false, true, data->uuid, NULL);
total_tests++;
if (uuid_be_cmp(data->be, be)) {
if (uuid_equal(&data->be, &be)) {
sprintf(buf, "%pUb", &be);
test_uuid_failed("cmp", false, true, data->uuid, buf);
}
@ -90,17 +90,17 @@ static void __init test_uuid_test(const struct test_uuid_data *data)
static void __init test_uuid_wrong(const char *data)
{
uuid_le le;
uuid_be be;
guid_t le;
uuid_t be;
/* LE */
total_tests++;
if (!uuid_le_to_bin(data, &le))
if (!guid_parse(data, &le))
test_uuid_failed("negative", true, false, data, NULL);
/* BE */
total_tests++;
if (!uuid_be_to_bin(data, &be))
if (!uuid_parse(data, &be))
test_uuid_failed("negative", true, true, data, NULL);
}

View file

@ -21,10 +21,13 @@
#include <linux/uuid.h>
#include <linux/random.h>
const u8 uuid_le_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
EXPORT_SYMBOL(uuid_le_index);
const u8 uuid_be_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
EXPORT_SYMBOL(uuid_be_index);
const guid_t guid_null;
EXPORT_SYMBOL(guid_null);
const uuid_t uuid_null;
EXPORT_SYMBOL(uuid_null);
const u8 guid_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
const u8 uuid_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
/***************************************************************
* Random UUID interface
@ -53,21 +56,21 @@ static void __uuid_gen_common(__u8 b[16])
b[8] = (b[8] & 0x3F) | 0x80;
}
void uuid_le_gen(uuid_le *lu)
void guid_gen(guid_t *lu)
{
__uuid_gen_common(lu->b);
/* version 4 : random generation */
lu->b[7] = (lu->b[7] & 0x0F) | 0x40;
}
EXPORT_SYMBOL_GPL(uuid_le_gen);
EXPORT_SYMBOL_GPL(guid_gen);
void uuid_be_gen(uuid_be *bu)
void uuid_gen(uuid_t *bu)
{
__uuid_gen_common(bu->b);
/* version 4 : random generation */
bu->b[6] = (bu->b[6] & 0x0F) | 0x40;
}
EXPORT_SYMBOL_GPL(uuid_be_gen);
EXPORT_SYMBOL_GPL(uuid_gen);
/**
* uuid_is_valid - checks if UUID string valid
@ -97,7 +100,7 @@ bool uuid_is_valid(const char *uuid)
}
EXPORT_SYMBOL(uuid_is_valid);
static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16])
static int __uuid_parse(const char *uuid, __u8 b[16], const u8 ei[16])
{
static const u8 si[16] = {0,2,4,6,9,11,14,16,19,21,24,26,28,30,32,34};
unsigned int i;
@ -115,14 +118,14 @@ static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16])
return 0;
}
int uuid_le_to_bin(const char *uuid, uuid_le *u)
int guid_parse(const char *uuid, guid_t *u)
{
return __uuid_to_bin(uuid, u->b, uuid_le_index);
return __uuid_parse(uuid, u->b, guid_index);
}
EXPORT_SYMBOL(uuid_le_to_bin);
EXPORT_SYMBOL(guid_parse);
int uuid_be_to_bin(const char *uuid, uuid_be *u)
int uuid_parse(const char *uuid, uuid_t *u)
{
return __uuid_to_bin(uuid, u->b, uuid_be_index);
return __uuid_parse(uuid, u->b, uuid_index);
}
EXPORT_SYMBOL(uuid_be_to_bin);
EXPORT_SYMBOL(uuid_parse);

View file

@ -1308,14 +1308,14 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
char uuid[UUID_STRING_LEN + 1];
char *p = uuid;
int i;
const u8 *index = uuid_be_index;
const u8 *index = uuid_index;
bool uc = false;
switch (*(++fmt)) {
case 'L':
uc = true; /* fall-through */
case 'l':
index = uuid_le_index;
index = guid_index;
break;
case 'B':
uc = true;

View file

@ -130,7 +130,7 @@ void __cleancache_init_shared_fs(struct super_block *sb)
int pool_id = CLEANCACHE_NO_BACKEND_SHARED;
if (cleancache_ops) {
pool_id = cleancache_ops->init_shared_fs(sb->s_uuid, PAGE_SIZE);
pool_id = cleancache_ops->init_shared_fs(&sb->s_uuid, PAGE_SIZE);
if (pool_id < 0)
pool_id = CLEANCACHE_NO_POOL;
}

View file

@ -75,6 +75,7 @@ static struct vfsmount *shm_mnt;
#include <uapi/linux/memfd.h>
#include <linux/userfaultfd_k.h>
#include <linux/rmap.h>
#include <linux/uuid.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
@ -3761,6 +3762,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
#ifdef CONFIG_TMPFS_POSIX_ACL
sb->s_flags |= MS_POSIXACL;
#endif
uuid_gen(&sb->s_uuid);
inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
if (!inode)

View file

@ -164,7 +164,7 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
hmac_misc.mode = inode->i_mode;
crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
if (evm_hmac_attrs & EVM_ATTR_FSUUID)
crypto_shash_update(desc, inode->i_sb->s_uuid,
crypto_shash_update(desc, &inode->i_sb->s_uuid.b[0],
sizeof(inode->i_sb->s_uuid));
crypto_shash_final(desc, digest);
}

View file

@ -61,7 +61,7 @@ struct ima_rule_entry {
enum ima_hooks func;
int mask;
unsigned long fsmagic;
u8 fsuuid[16];
uuid_t fsuuid;
kuid_t uid;
kuid_t fowner;
bool (*uid_op)(kuid_t, kuid_t); /* Handlers for operators */
@ -244,7 +244,7 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
&& rule->fsmagic != inode->i_sb->s_magic)
return false;
if ((rule->flags & IMA_FSUUID) &&
memcmp(rule->fsuuid, inode->i_sb->s_uuid, sizeof(rule->fsuuid)))
!uuid_equal(&rule->fsuuid, &inode->i_sb->s_uuid))
return false;
if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid))
return false;
@ -711,14 +711,12 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
case Opt_fsuuid:
ima_log_string(ab, "fsuuid", args[0].from);
if (memchr_inv(entry->fsuuid, 0x00,
sizeof(entry->fsuuid))) {
if (uuid_is_null(&entry->fsuuid)) {
result = -EINVAL;
break;
}
result = blk_part_pack_uuid(args[0].from,
entry->fsuuid);
result = uuid_parse(args[0].from, &entry->fsuuid);
if (!result)
entry->flags |= IMA_FSUUID;
break;
@ -1087,7 +1085,7 @@ int ima_policy_show(struct seq_file *m, void *v)
}
if (entry->flags & IMA_FSUUID) {
seq_printf(m, "fsuuid=%pU", entry->fsuuid);
seq_printf(m, "fsuuid=%pU", &entry->fsuuid);
seq_puts(m, " ");
}

View file

@ -21,8 +21,9 @@
#include "skl.h"
/* Unique identification for getting NHLT blobs */
static u8 OSC_UUID[16] = {0x6E, 0x88, 0x9F, 0xA6, 0xEB, 0x6C, 0x94, 0x45,
0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53};
static guid_t osc_guid =
GUID_INIT(0xA69F886E, 0x6CEB, 0x4594,
0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53);
struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
{
@ -37,7 +38,7 @@ struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
return NULL;
}
obj = acpi_evaluate_dsm(handle, OSC_UUID, 1, 1, NULL);
obj = acpi_evaluate_dsm(handle, &osc_guid, 1, 1, NULL);
if (obj && obj->type == ACPI_TYPE_BUFFER) {
nhlt_ptr = (struct nhlt_resource_desc *)obj->buffer.pointer;
nhlt_table = (struct nhlt_acpi_table *)

View file

@ -370,7 +370,7 @@ acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
}
EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
u64 rev, u64 func, union acpi_object *argv4)
{
union acpi_object *obj = ERR_PTR(-ENXIO);
@ -379,11 +379,11 @@ union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
rcu_read_lock();
ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
if (ops)
obj = ops->evaluate_dsm(handle, uuid, rev, func, argv4);
obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
rcu_read_unlock();
if (IS_ERR(obj))
return acpi_evaluate_dsm(handle, uuid, rev, func, argv4);
return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
return obj;
}
EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);

View file

@ -1559,7 +1559,7 @@ static unsigned long nfit_ctl_handle;
union acpi_object *result;
static union acpi_object *nfit_test_evaluate_dsm(acpi_handle handle,
const u8 *uuid, u64 rev, u64 func, union acpi_object *argv4)
const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4)
{
if (handle != &nfit_ctl_handle)
return ERR_PTR(-ENXIO);

View file

@ -13,6 +13,7 @@
#ifndef __NFIT_TEST_H__
#define __NFIT_TEST_H__
#include <linux/list.h>
#include <linux/uuid.h>
#include <linux/ioport.h>
#include <linux/spinlock_types.h>
@ -36,7 +37,8 @@ typedef void *acpi_handle;
typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
const u8 *uuid, u64 rev, u64 func, union acpi_object *argv4);
const guid_t *guid, u64 rev, u64 func,
union acpi_object *argv4);
void __iomem *__wrap_ioremap_nocache(resource_size_t offset,
unsigned long size);
void __wrap_iounmap(volatile void __iomem *addr);