libnvdimm for 5.5

- Updates to better support vmalloc space restrictions on PowerPC platforms.
 
 - Cleanups to move common sysfs attributes to core 'struct device_type'
   objects.
 
 - Export the 'target_node' attribute (the effective numa node if pmem is
   marked online) for regions and namespaces.
 
 - Miscellaneous fixups and optimizations.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEf41QbsdZzFdA8EfZHtKRamZ9iAIFAl3hZEAACgkQHtKRamZ9
 iAJ9Sg/+MVuwazQyL8dLpvEl534SncDjurTCrRE9SOHhMXGp78AN3t6zDKB2sr2Y
 /iE4gSvg6DTj2xI2Hg1KFh5AMiSOtI8qJkhb2IL+cbmGhfYpwKWnQUStkoMMZpxJ
 sCEsk1js0KsGRkPDCayDGosrzKoO0K2VKVY/kGgFdP9cEOhm/H6CVNARrkDtZDzD
 P9GQ+7VCTjS2OLCFHVECdsDQD1XfzL6pW8GW2f/WpKy7NbxaNG3FFTZ5NOFUh+v6
 5VZaOXFIPo8DCot+K2bXJgtWDqVU4TscRoEJcFM8G74Ggi7L1gG84lA/1IABfg16
 GFYQ3qaKlyE9mvy147FZvHzIHDTx/TT5WNB8Efoy61xiH+ACtlu5ss1GksX+7Pl8
 CPLrM2vy0dgSCJ65qOe9/ztoohj+7Xidx9roctx3gtRSURq6txsIzmhG4rn7bdRx
 s7VGz4Ov4VhrdA1ILCDMGr2Rm8yjf2RnhEj8IzA7e4VqsQ59/hRbXZNm6jmFdkyU
 zNbq8m5Y2Y1bOTcxYMIRS9xEdcbRIv1PyZ8ByvwpvbW1zSFbRYmZNhmG531pkUSU
 tIBpVWTmcsxvvKIL+LkZHQ+jzrE2wOeQtFIKZedDKKBKw9YxaYAfSEldQQfI3FrX
 7GruA2ipU787bDX1K/QChnEbGk0R9nBo3ET/0vsnCCtEJADs794=
 =ITT3
 -----END PGP SIGNATURE-----

Merge tag 'libnvdimm-for-5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull libnvdimm updates from Dan Williams:
 "The highlight this cycle is continuing integration fixes for PowerPC
  and some resulting optimizations.

  Summary:

   - Updates to better support vmalloc space restrictions on PowerPC
     platforms.

   - Cleanups to move common sysfs attributes to core 'struct
     device_type' objects.

   - Export the 'target_node' attribute (the effective numa node if pmem
     is marked online) for regions and namespaces.

   - Miscellaneous fixups and optimizations"

* tag 'libnvdimm-for-5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (21 commits)
  MAINTAINERS: Remove Keith from NVDIMM maintainers
  libnvdimm: Export the target_node attribute for regions and namespaces
  dax: Add numa_node to the default device-dax attributes
  libnvdimm: Simplify root read-only definition for the 'resource' attribute
  dax: Simplify root read-only definition for the 'resource' attribute
  dax: Create a dax device_type
  libnvdimm: Move nvdimm_bus_attribute_group to device_type
  libnvdimm: Move nvdimm_attribute_group to device_type
  libnvdimm: Move nd_mapping_attribute_group to device_type
  libnvdimm: Move nd_region_attribute_group to device_type
  libnvdimm: Move nd_numa_attribute_group to device_type
  libnvdimm: Move nd_device_attribute_group to device_type
  libnvdimm: Move region attribute group definition
  libnvdimm: Move attribute groups to device type
  libnvdimm: Remove prototypes for nonexistent functions
  libnvdimm/btt: fix variable 'rc' set but not used
  libnvdimm/pmem: Delete include of nd-core.h
  libnvdimm/namespace: Differentiate between probe mapping and runtime mapping
  libnvdimm/pfn_dev: Don't clear device memmap area during generic namespace probe
  libnvdimm: Trivial comment fix
  ...
This commit is contained in:
Linus Torvalds 2019-12-01 18:43:25 -08:00
commit d10032dd53
22 changed files with 387 additions and 354 deletions

View File

@ -4911,7 +4911,6 @@ F: include/trace/events/fs_dax.h
DEVICE DIRECT ACCESS (DAX)
M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Keith Busch <keith.busch@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
L: linux-nvdimm@lists.01.org
S: Supported
@ -9439,7 +9438,6 @@ LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
M: Keith Busch <keith.busch@intel.com>
M: Ira Weiny <ira.weiny@intel.com>
L: linux-nvdimm@lists.01.org
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/

View File

@ -284,25 +284,6 @@ int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
return 0;
}
static const struct attribute_group *region_attr_groups[] = {
&nd_region_attribute_group,
&nd_device_attribute_group,
&nd_mapping_attribute_group,
&nd_numa_attribute_group,
NULL,
};
static const struct attribute_group *bus_attr_groups[] = {
&nvdimm_bus_attribute_group,
NULL,
};
static const struct attribute_group *papr_scm_dimm_groups[] = {
&nvdimm_attribute_group,
&nd_device_attribute_group,
NULL,
};
static inline int papr_scm_node(int node)
{
int min_dist = INT_MAX, dist;
@ -333,7 +314,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
p->bus_desc.ndctl = papr_scm_ndctl;
p->bus_desc.module = THIS_MODULE;
p->bus_desc.of_node = p->pdev->dev.of_node;
p->bus_desc.attr_groups = bus_attr_groups;
p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
if (!p->bus_desc.provider_name)
@ -348,8 +328,8 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
dimm_flags = 0;
set_bit(NDD_ALIASING, &dimm_flags);
p->nvdimm = nvdimm_create(p->bus, p, papr_scm_dimm_groups,
dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
p->nvdimm = nvdimm_create(p->bus, p, NULL, dimm_flags,
PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
if (!p->nvdimm) {
dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
goto err;
@ -366,7 +346,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
mapping.size = p->blocks * p->block_size; // XXX: potential overflow?
memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.attr_groups = region_attr_groups;
target_nid = dev_to_node(&p->pdev->dev);
online_nid = papr_scm_node(target_nid);
ndr_desc.numa_node = online_nid;

View File

@ -1404,7 +1404,6 @@ static const struct attribute_group acpi_nfit_attribute_group = {
};
static const struct attribute_group *acpi_nfit_attribute_groups[] = {
&nvdimm_bus_attribute_group,
&acpi_nfit_attribute_group,
NULL,
};
@ -1698,8 +1697,6 @@ static const struct attribute_group acpi_nfit_dimm_attribute_group = {
};
static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
&nvdimm_attribute_group,
&nd_device_attribute_group,
&acpi_nfit_dimm_attribute_group,
NULL,
};
@ -2197,10 +2194,6 @@ static const struct attribute_group acpi_nfit_region_attribute_group = {
};
static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
&nd_region_attribute_group,
&nd_mapping_attribute_group,
&nd_device_attribute_group,
&nd_numa_attribute_group,
&acpi_nfit_region_attribute_group,
NULL,
};

View File

@ -309,7 +309,7 @@ static ssize_t resource_show(struct device *dev,
return sprintf(buf, "%#llx\n", dev_dax_resource(dev_dax));
}
static DEVICE_ATTR_RO(resource);
static DEVICE_ATTR(resource, 0400, resource_show, NULL);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
@ -322,6 +322,13 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(modalias);
static ssize_t numa_node_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", dev_to_node(dev));
}
static DEVICE_ATTR_RO(numa_node);
static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
@ -329,8 +336,8 @@ static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0)
return 0;
if (a == &dev_attr_resource.attr)
return 0400;
if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA))
return 0;
return a->mode;
}
@ -339,6 +346,7 @@ static struct attribute *dev_dax_attributes[] = {
&dev_attr_size.attr,
&dev_attr_target_node.attr,
&dev_attr_resource.attr,
&dev_attr_numa_node.attr,
NULL,
};
@ -373,6 +381,11 @@ static void dev_dax_release(struct device *dev)
kfree(dev_dax);
}
static const struct device_type dev_dax_type = {
.release = dev_dax_release,
.groups = dax_attribute_groups,
};
static void unregister_dev_dax(void *dev)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
@ -430,8 +443,7 @@ struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id,
else
dev->class = dax_class;
dev->parent = parent;
dev->groups = dax_attribute_groups;
dev->release = dev_dax_release;
dev->type = &dev_dax_type;
dev_set_name(dev, "dax%d.%d", dax_region->id, id);
rc = device_add(dev);

View File

@ -25,20 +25,20 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
ndns = nvdimm_namespace_common_probe(dev);
if (IS_ERR(ndns))
return ERR_CAST(ndns);
nsio = to_nd_namespace_io(&ndns->dev);
/* parse the 'pfn' info block via ->rw_bytes */
rc = devm_nsio_enable(dev, nsio);
rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
if (rc)
return ERR_PTR(rc);
rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
if (rc)
return ERR_PTR(rc);
devm_nsio_disable(dev, nsio);
devm_namespace_disable(dev, ndns);
/* reserve the metadata area, device-dax will reserve the data */
pfn_sb = nd_pfn->pfn_sb;
offset = le64_to_cpu(pfn_sb->dataoff);
nsio = to_nd_namespace_io(&ndns->dev);
if (!devm_request_mem_region(dev, nsio->res.start, offset,
dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve metadata\n");

View File

@ -1261,11 +1261,11 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
ret = btt_data_read(arena, page, off, postmap, cur_len);
if (ret) {
int rc;
/* Media error - set the e_flag */
rc = btt_map_write(arena, premap, postmap, 0, 1,
NVDIMM_IO_ATOMIC);
if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
dev_warn_ratelimited(to_dev(arena),
"Error persistently tracking bad blocks at %#x\n",
premap);
goto out_rtt;
}
@ -1674,7 +1674,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
struct nd_region *nd_region;
struct btt_sb *btt_sb;
struct btt *btt;
size_t rawsize;
size_t size, rawsize;
int rc;
if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
@ -1685,6 +1686,11 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
if (!btt_sb)
return -ENOMEM;
size = nvdimm_namespace_capacity(ndns);
rc = devm_namespace_enable(&nd_btt->dev, ndns, size);
if (rc)
return rc;
/*
* If this returns < 0, that is ok as it just means there wasn't
* an existing BTT, and we're creating a new one. We still need to
@ -1693,7 +1699,7 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
*/
nd_btt_version(nd_btt, ndns, btt_sb);
rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset;
rawsize = size - nd_btt->initial_offset;
if (rawsize < ARENA_MIN_SIZE) {
dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
dev_name(&ndns->dev),

View File

@ -25,17 +25,6 @@ static void nd_btt_release(struct device *dev)
kfree(nd_btt);
}
static struct device_type nd_btt_device_type = {
.name = "nd_btt",
.release = nd_btt_release,
};
bool is_nd_btt(struct device *dev)
{
return dev->type == &nd_btt_device_type;
}
EXPORT_SYMBOL(is_nd_btt);
struct nd_btt *to_nd_btt(struct device *dev)
{
struct nd_btt *nd_btt = container_of(dev, struct nd_btt, dev);
@ -178,6 +167,18 @@ static const struct attribute_group *nd_btt_attribute_groups[] = {
NULL,
};
static const struct device_type nd_btt_device_type = {
.name = "nd_btt",
.release = nd_btt_release,
.groups = nd_btt_attribute_groups,
};
bool is_nd_btt(struct device *dev)
{
return dev->type == &nd_btt_device_type;
}
EXPORT_SYMBOL(is_nd_btt);
static struct device *__nd_btt_create(struct nd_region *nd_region,
unsigned long lbasize, u8 *uuid,
struct nd_namespace_common *ndns)
@ -204,7 +205,6 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id);
dev->parent = &nd_region->dev;
dev->type = &nd_btt_device_type;
dev->groups = nd_btt_attribute_groups;
device_initialize(&nd_btt->dev);
if (ndns && !__nd_attach_ndns(&nd_btt->dev, ndns, &nd_btt->ndns)) {
dev_dbg(&ndns->dev, "failed, already claimed by %s\n",

View File

@ -300,9 +300,14 @@ static void nvdimm_bus_release(struct device *dev)
kfree(nvdimm_bus);
}
static const struct device_type nvdimm_bus_dev_type = {
.release = nvdimm_bus_release,
.groups = nvdimm_bus_attribute_groups,
};
bool is_nvdimm_bus(struct device *dev)
{
return dev->release == nvdimm_bus_release;
return dev->type == &nvdimm_bus_dev_type;
}
struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev)
@ -355,7 +360,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
badrange_init(&nvdimm_bus->badrange);
nvdimm_bus->nd_desc = nd_desc;
nvdimm_bus->dev.parent = parent;
nvdimm_bus->dev.release = nvdimm_bus_release;
nvdimm_bus->dev.type = &nvdimm_bus_dev_type;
nvdimm_bus->dev.groups = nd_desc->attr_groups;
nvdimm_bus->dev.bus = &nvdimm_bus_type;
nvdimm_bus->dev.of_node = nd_desc->of_node;
@ -669,10 +674,9 @@ static struct attribute *nd_device_attributes[] = {
/*
* nd_device_attribute_group - generic attributes for all devices on an nd bus
*/
struct attribute_group nd_device_attribute_group = {
const struct attribute_group nd_device_attribute_group = {
.attrs = nd_device_attributes,
};
EXPORT_SYMBOL_GPL(nd_device_attribute_group);
static ssize_t numa_node_show(struct device *dev,
struct device_attribute *attr, char *buf)
@ -681,28 +685,56 @@ static ssize_t numa_node_show(struct device *dev,
}
static DEVICE_ATTR_RO(numa_node);
static int nvdimm_dev_to_target_node(struct device *dev)
{
struct device *parent = dev->parent;
struct nd_region *nd_region = NULL;
if (is_nd_region(dev))
nd_region = to_nd_region(dev);
else if (parent && is_nd_region(parent))
nd_region = to_nd_region(parent);
if (!nd_region)
return NUMA_NO_NODE;
return nd_region->target_node;
}
static ssize_t target_node_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", nvdimm_dev_to_target_node(dev));
}
static DEVICE_ATTR_RO(target_node);
static struct attribute *nd_numa_attributes[] = {
&dev_attr_numa_node.attr,
&dev_attr_target_node.attr,
NULL,
};
static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, typeof(*dev), kobj);
if (!IS_ENABLED(CONFIG_NUMA))
return 0;
if (a == &dev_attr_target_node.attr &&
nvdimm_dev_to_target_node(dev) == NUMA_NO_NODE)
return 0;
return a->mode;
}
/*
* nd_numa_attribute_group - NUMA attributes for all devices on an nd bus
*/
struct attribute_group nd_numa_attribute_group = {
const struct attribute_group nd_numa_attribute_group = {
.attrs = nd_numa_attributes,
.is_visible = nd_numa_attr_visible,
};
EXPORT_SYMBOL_GPL(nd_numa_attribute_group);
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
{

View File

@ -300,13 +300,14 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
return rc;
}
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
resource_size_t size)
{
struct resource *res = &nsio->res;
struct nd_namespace_common *ndns = &nsio->common;
nsio->size = resource_size(res);
if (!devm_request_mem_region(dev, res->start, resource_size(res),
nsio->size = size;
if (!devm_request_mem_region(dev, res->start, size,
dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve region %pR\n", res);
return -EBUSY;
@ -318,12 +319,10 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
&nsio->res);
nsio->addr = devm_memremap(dev, res->start, resource_size(res),
ARCH_MEMREMAP_PMEM);
nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM);
return PTR_ERR_OR_ZERO(nsio->addr);
}
EXPORT_SYMBOL_GPL(devm_nsio_enable);
void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
{
@ -331,6 +330,5 @@ void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
devm_memunmap(dev, nsio->addr);
devm_exit_badblocks(dev, &nsio->bb);
devm_release_mem_region(dev, res->start, resource_size(res));
devm_release_mem_region(dev, res->start, nsio->size);
}
EXPORT_SYMBOL_GPL(devm_nsio_disable);

View File

@ -385,10 +385,14 @@ static struct attribute *nvdimm_bus_attributes[] = {
NULL,
};
struct attribute_group nvdimm_bus_attribute_group = {
static const struct attribute_group nvdimm_bus_attribute_group = {
.attrs = nvdimm_bus_attributes,
};
EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group);
const struct attribute_group *nvdimm_bus_attribute_groups[] = {
&nvdimm_bus_attribute_group,
NULL,
};
int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
{

View File

@ -23,17 +23,6 @@ static void nd_dax_release(struct device *dev)
kfree(nd_dax);
}
static struct device_type nd_dax_device_type = {
.name = "nd_dax",
.release = nd_dax_release,
};
bool is_nd_dax(struct device *dev)
{
return dev ? dev->type == &nd_dax_device_type : false;
}
EXPORT_SYMBOL(is_nd_dax);
struct nd_dax *to_nd_dax(struct device *dev)
{
struct nd_dax *nd_dax = container_of(dev, struct nd_dax, nd_pfn.dev);
@ -43,13 +32,18 @@ struct nd_dax *to_nd_dax(struct device *dev)
}
EXPORT_SYMBOL(to_nd_dax);
static const struct attribute_group *nd_dax_attribute_groups[] = {
&nd_pfn_attribute_group,
&nd_device_attribute_group,
&nd_numa_attribute_group,
NULL,
static const struct device_type nd_dax_device_type = {
.name = "nd_dax",
.release = nd_dax_release,
.groups = nd_pfn_attribute_groups,
};
bool is_nd_dax(struct device *dev)
{
return dev ? dev->type == &nd_dax_device_type : false;
}
EXPORT_SYMBOL(is_nd_dax);
static struct nd_dax *nd_dax_alloc(struct nd_region *nd_region)
{
struct nd_pfn *nd_pfn;
@ -69,7 +63,6 @@ static struct nd_dax *nd_dax_alloc(struct nd_region *nd_region)
dev = &nd_pfn->dev;
dev_set_name(dev, "dax%d.%d", nd_region->id, nd_pfn->id);
dev->groups = nd_dax_attribute_groups;
dev->type = &nd_dax_device_type;
dev->parent = &nd_region->dev;

View File

@ -202,16 +202,6 @@ static void nvdimm_release(struct device *dev)
kfree(nvdimm);
}
static struct device_type nvdimm_device_type = {
.name = "nvdimm",
.release = nvdimm_release,
};
bool is_nvdimm(struct device *dev)
{
return dev->type == &nvdimm_device_type;
}
struct nvdimm *to_nvdimm(struct device *dev)
{
struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
@ -450,11 +440,27 @@ static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
return 0;
}
struct attribute_group nvdimm_attribute_group = {
static const struct attribute_group nvdimm_attribute_group = {
.attrs = nvdimm_attributes,
.is_visible = nvdimm_visible,
};
EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
static const struct attribute_group *nvdimm_attribute_groups[] = {
&nd_device_attribute_group,
&nvdimm_attribute_group,
NULL,
};
static const struct device_type nvdimm_device_type = {
.name = "nvdimm",
.release = nvdimm_release,
.groups = nvdimm_attribute_groups,
};
bool is_nvdimm(struct device *dev)
{
return dev->type == &nvdimm_device_type;
}
struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
void *provider_data, const struct attribute_group **groups,

View File

@ -8,17 +8,6 @@
#include <linux/libnvdimm.h>
#include <linux/module.h>
static const struct attribute_group *e820_pmem_attribute_groups[] = {
&nvdimm_bus_attribute_group,
NULL,
};
static const struct attribute_group *e820_pmem_region_attribute_groups[] = {
&nd_region_attribute_group,
&nd_device_attribute_group,
NULL,
};
static int e820_pmem_remove(struct platform_device *pdev)
{
struct nvdimm_bus *nvdimm_bus = platform_get_drvdata(pdev);
@ -46,7 +35,6 @@ static int e820_register_one(struct resource *res, void *data)
memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.res = res;
ndr_desc.attr_groups = e820_pmem_region_attribute_groups;
ndr_desc.numa_node = e820_range_to_nid(res->start);
ndr_desc.target_node = ndr_desc.numa_node;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
@ -62,7 +50,6 @@ static int e820_pmem_probe(struct platform_device *pdev)
struct nvdimm_bus *nvdimm_bus;
int rc = -ENXIO;
nd_desc.attr_groups = e820_pmem_attribute_groups;
nd_desc.provider_name = "e820";
nd_desc.module = THIS_MODULE;
nvdimm_bus = nvdimm_bus_register(dev, &nd_desc);

View File

@ -44,35 +44,9 @@ static void namespace_blk_release(struct device *dev)
kfree(nsblk);
}
static const struct device_type namespace_io_device_type = {
.name = "nd_namespace_io",
.release = namespace_io_release,
};
static const struct device_type namespace_pmem_device_type = {
.name = "nd_namespace_pmem",
.release = namespace_pmem_release,
};
static const struct device_type namespace_blk_device_type = {
.name = "nd_namespace_blk",
.release = namespace_blk_release,
};
static bool is_namespace_pmem(const struct device *dev)
{
return dev ? dev->type == &namespace_pmem_device_type : false;
}
static bool is_namespace_blk(const struct device *dev)
{
return dev ? dev->type == &namespace_blk_device_type : false;
}
static bool is_namespace_io(const struct device *dev)
{
return dev ? dev->type == &namespace_io_device_type : false;
}
static bool is_namespace_pmem(const struct device *dev);
static bool is_namespace_blk(const struct device *dev);
static bool is_namespace_io(const struct device *dev);
static int is_uuid_busy(struct device *dev, void *data)
{
@ -1329,7 +1303,7 @@ static ssize_t resource_show(struct device *dev,
return -ENXIO;
return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
}
static DEVICE_ATTR_RO(resource);
static DEVICE_ATTR(resource, 0400, resource_show, NULL);
static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
4096, 4104, 4160, 4224, 0 };
@ -1510,16 +1484,20 @@ static ssize_t holder_show(struct device *dev,
}
static DEVICE_ATTR_RO(holder);
static ssize_t __holder_class_store(struct device *dev, const char *buf)
static int __holder_class_store(struct device *dev, const char *buf)
{
struct nd_namespace_common *ndns = to_ndns(dev);
if (dev->driver || ndns->claim)
return -EBUSY;
if (sysfs_streq(buf, "btt"))
ndns->claim_class = btt_claim_class(dev);
else if (sysfs_streq(buf, "pfn"))
if (sysfs_streq(buf, "btt")) {
int rc = btt_claim_class(dev);
if (rc < NVDIMM_CCLASS_NONE)
return rc;
ndns->claim_class = rc;
} else if (sysfs_streq(buf, "pfn"))
ndns->claim_class = NVDIMM_CCLASS_PFN;
else if (sysfs_streq(buf, "dax"))
ndns->claim_class = NVDIMM_CCLASS_DAX;
@ -1528,10 +1506,6 @@ static ssize_t __holder_class_store(struct device *dev, const char *buf)
else
return -EINVAL;
/* btt_claim_class() could've returned an error */
if (ndns->claim_class < 0)
return ndns->claim_class;
return 0;
}
@ -1539,7 +1513,7 @@ static ssize_t holder_class_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
int rc;
nd_device_lock(dev);
nvdimm_bus_lock(dev);
@ -1547,7 +1521,7 @@ static ssize_t holder_class_store(struct device *dev,
rc = __holder_class_store(dev, buf);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@ -1645,11 +1619,8 @@ static umode_t namespace_visible(struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
if (a == &dev_attr_resource.attr) {
if (is_namespace_blk(dev))
return 0;
return 0400;
}
if (a == &dev_attr_resource.attr && is_namespace_blk(dev))
return 0;
if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
if (a == &dev_attr_size.attr)
@ -1680,6 +1651,39 @@ static const struct attribute_group *nd_namespace_attribute_groups[] = {
NULL,
};
static const struct device_type namespace_io_device_type = {
.name = "nd_namespace_io",
.release = namespace_io_release,
.groups = nd_namespace_attribute_groups,
};
static const struct device_type namespace_pmem_device_type = {
.name = "nd_namespace_pmem",
.release = namespace_pmem_release,
.groups = nd_namespace_attribute_groups,
};
static const struct device_type namespace_blk_device_type = {
.name = "nd_namespace_blk",
.release = namespace_blk_release,
.groups = nd_namespace_attribute_groups,
};
static bool is_namespace_pmem(const struct device *dev)
{
return dev ? dev->type == &namespace_pmem_device_type : false;
}
static bool is_namespace_blk(const struct device *dev)
{
return dev ? dev->type == &namespace_blk_device_type : false;
}
static bool is_namespace_io(const struct device *dev)
{
return dev ? dev->type == &namespace_io_device_type : false;
}
struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
{
struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
@ -1759,6 +1763,23 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
}
EXPORT_SYMBOL(nvdimm_namespace_common_probe);
int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
resource_size_t size)
{
if (is_namespace_blk(&ndns->dev))
return 0;
return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
}
EXPORT_SYMBOL_GPL(devm_namespace_enable);
void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
{
if (is_namespace_blk(&ndns->dev))
return;
devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
}
EXPORT_SYMBOL_GPL(devm_namespace_disable);
static struct device **create_namespace_io(struct nd_region *nd_region)
{
struct nd_namespace_io *nsio;
@ -2078,7 +2099,6 @@ static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
}
dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
dev->parent = &nd_region->dev;
dev->groups = nd_namespace_attribute_groups;
return &nsblk->common.dev;
}
@ -2109,7 +2129,6 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
return NULL;
}
dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
dev->groups = nd_namespace_attribute_groups;
nd_namespace_pmem_set_resource(nd_region, nspm, 0);
return dev;
@ -2608,7 +2627,6 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
if (id < 0)
break;
dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
dev->groups = nd_namespace_attribute_groups;
nd_device_register(dev);
}
if (i)

View File

@ -123,11 +123,7 @@ void nd_region_create_dax_seed(struct nd_region *nd_region);
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus);
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
void nd_synchronize(void);
int nvdimm_bus_register_dimms(struct nvdimm_bus *nvdimm_bus);
int nvdimm_bus_register_regions(struct nvdimm_bus *nvdimm_bus);
int nvdimm_bus_init_interleave_sets(struct nvdimm_bus *nvdimm_bus);
void __nd_device_register(struct device *dev);
int nd_match_dimm(struct device *dev, void *data);
struct nd_label_id;
char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags);
bool nd_is_uuid_unique(struct device *dev, u8 *uuid);
@ -170,6 +166,23 @@ ssize_t nd_namespace_store(struct device *dev,
struct nd_pfn *to_nd_pfn_safe(struct device *dev);
bool is_nvdimm_bus(struct device *dev);
#if IS_ENABLED(CONFIG_ND_CLAIM)
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
resource_size_t size);
void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
#else
static inline int devm_nsio_enable(struct device *dev,
struct nd_namespace_io *nsio, resource_size_t size)
{
return -ENXIO;
}
static inline void devm_nsio_disable(struct device *dev,
struct nd_namespace_io *nsio)
{
}
#endif
#ifdef CONFIG_PROVE_LOCKING
extern struct class *nd_class;

View File

@ -212,6 +212,11 @@ struct nd_dax {
struct nd_pfn nd_pfn;
};
static inline u32 nd_info_block_reserve(void)
{
return ALIGN(SZ_8K, PAGE_SIZE);
}
enum nd_async_mode {
ND_SYNC,
ND_ASYNC,
@ -234,6 +239,9 @@ int __init nd_label_init(void);
void nvdimm_exit(void);
void nd_region_exit(void);
struct nvdimm;
extern const struct attribute_group nd_device_attribute_group;
extern const struct attribute_group nd_numa_attribute_group;
extern const struct attribute_group *nvdimm_bus_attribute_groups[];
struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
int nvdimm_check_config_data(struct device *dev);
int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
@ -297,7 +305,7 @@ struct device *nd_pfn_create(struct nd_region *nd_region);
struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
struct nd_namespace_common *ndns);
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig);
extern struct attribute_group nd_pfn_attribute_group;
extern const struct attribute_group *nd_pfn_attribute_groups[];
#else
static inline int nd_pfn_probe(struct device *dev,
struct nd_namespace_common *ndns)
@ -370,29 +378,20 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct badblocks *bb, const struct resource *res);
int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
resource_size_t size);
void devm_namespace_disable(struct device *dev,
struct nd_namespace_common *ndns);
#if IS_ENABLED(CONFIG_ND_CLAIM)
/* max struct page size independent of kernel config */
#define MAX_STRUCT_PAGE_SIZE 64
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
#else
static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
struct dev_pagemap *pgmap)
{
return -ENXIO;
}
static inline int devm_nsio_enable(struct device *dev,
struct nd_namespace_io *nsio)
{
return -ENXIO;
}
static inline void devm_nsio_disable(struct device *dev,
struct nd_namespace_io *nsio)
{
}
#endif
int nd_blk_region_init(struct nd_region *nd_region);
int nd_region_activate(struct nd_region *nd_region);

View File

@ -9,17 +9,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
static const struct attribute_group *region_attr_groups[] = {
&nd_region_attribute_group,
&nd_device_attribute_group,
NULL,
};
static const struct attribute_group *bus_attr_groups[] = {
&nvdimm_bus_attribute_group,
NULL,
};
struct of_pmem_private {
struct nvdimm_bus_descriptor bus_desc;
struct nvdimm_bus *bus;
@ -41,7 +30,6 @@ static int of_pmem_region_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
priv->bus_desc.attr_groups = bus_attr_groups;
priv->bus_desc.provider_name = kstrdup(pdev->name, GFP_KERNEL);
priv->bus_desc.module = THIS_MODULE;
priv->bus_desc.of_node = np;
@ -66,7 +54,6 @@ static int of_pmem_region_probe(struct platform_device *pdev)
* structures so passing a stack pointer is fine.
*/
memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.attr_groups = region_attr_groups;
ndr_desc.numa_node = dev_to_node(&pdev->dev);
ndr_desc.target_node = ndr_desc.numa_node;
ndr_desc.res = &pdev->resource[i];

View File

@ -26,17 +26,6 @@ static void nd_pfn_release(struct device *dev)
kfree(nd_pfn);
}
static struct device_type nd_pfn_device_type = {
.name = "nd_pfn",
.release = nd_pfn_release,
};
bool is_nd_pfn(struct device *dev)
{
return dev ? dev->type == &nd_pfn_device_type : false;
}
EXPORT_SYMBOL(is_nd_pfn);
struct nd_pfn *to_nd_pfn(struct device *dev)
{
struct nd_pfn *nd_pfn = container_of(dev, struct nd_pfn, dev);
@ -229,7 +218,7 @@ static ssize_t resource_show(struct device *dev,
return rc;
}
static DEVICE_ATTR_RO(resource);
static DEVICE_ATTR(resource, 0400, resource_show, NULL);
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
@ -280,25 +269,29 @@ static struct attribute *nd_pfn_attributes[] = {
NULL,
};
static umode_t pfn_visible(struct kobject *kobj, struct attribute *a, int n)
{
if (a == &dev_attr_resource.attr)
return 0400;
return a->mode;
}
struct attribute_group nd_pfn_attribute_group = {
static struct attribute_group nd_pfn_attribute_group = {
.attrs = nd_pfn_attributes,
.is_visible = pfn_visible,
};
static const struct attribute_group *nd_pfn_attribute_groups[] = {
const struct attribute_group *nd_pfn_attribute_groups[] = {
&nd_pfn_attribute_group,
&nd_device_attribute_group,
&nd_numa_attribute_group,
NULL,
};
static const struct device_type nd_pfn_device_type = {
.name = "nd_pfn",
.release = nd_pfn_release,
.groups = nd_pfn_attribute_groups,
};
bool is_nd_pfn(struct device *dev)
{
return dev ? dev->type == &nd_pfn_device_type : false;
}
EXPORT_SYMBOL(is_nd_pfn);
struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
struct nd_namespace_common *ndns)
{
@ -337,7 +330,6 @@ static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
dev = &nd_pfn->dev;
dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
dev->groups = nd_pfn_attribute_groups;
dev->type = &nd_pfn_device_type;
dev->parent = &nd_region->dev;
@ -382,6 +374,15 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
meta_start = (SZ_4K + sizeof(*pfn_sb)) >> 9;
meta_num = (le64_to_cpu(pfn_sb->dataoff) >> 9) - meta_start;
/*
* re-enable the namespace with correct size so that we can access
* the device memmap area.
*/
devm_namespace_disable(&nd_pfn->dev, ndns);
rc = devm_namespace_enable(&nd_pfn->dev, ndns, le64_to_cpu(pfn_sb->dataoff));
if (rc)
return rc;
do {
unsigned long zero_len;
u64 nsoff;
@ -591,7 +592,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
return -ENXIO;
}
return nd_pfn_clear_memmap_errors(nd_pfn);
return 0;
}
EXPORT_SYMBOL(nd_pfn_validate);
@ -635,11 +636,6 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
}
EXPORT_SYMBOL(nd_pfn_probe);
static u32 info_block_reserve(void)
{
return ALIGN(SZ_8K, PAGE_SIZE);
}
/*
* We hotplug memory at sub-section granularity, pad the reserved area
* from the previous section base to the namespace base address.
@ -653,7 +649,7 @@ static unsigned long init_altmap_base(resource_size_t base)
static unsigned long init_altmap_reserve(resource_size_t base)
{
unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
unsigned long reserve = nd_info_block_reserve() >> PAGE_SHIFT;
unsigned long base_pfn = PHYS_PFN(base);
reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
@ -668,7 +664,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
u64 offset = le64_to_cpu(pfn_sb->dataoff);
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
u32 reserve = info_block_reserve();
u32 reserve = nd_info_block_reserve();
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
resource_size_t base = nsio->res.start + start_pad;
@ -729,6 +725,8 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
sig = PFN_SIG;
rc = nd_pfn_validate(nd_pfn, sig);
if (rc == 0)
return nd_pfn_clear_memmap_errors(nd_pfn);
if (rc != -ENODEV)
return rc;
@ -796,6 +794,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
rc = nd_pfn_clear_memmap_errors(nd_pfn);
if (rc)
return rc;
return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0);
}

View File

@ -28,7 +28,6 @@
#include "pmem.h"
#include "pfn.h"
#include "nd.h"
#include "nd-core.h"
static struct device *to_dev(struct pmem_device *pmem)
{
@ -372,6 +371,10 @@ static int pmem_attach_disk(struct device *dev,
if (!pmem)
return -ENOMEM;
rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
if (rc)
return rc;
/* while nsio_rw_bytes is active, parse a pfn info block if present */
if (is_nd_pfn(dev)) {
nd_pfn = to_nd_pfn(dev);
@ -381,7 +384,7 @@ static int pmem_attach_disk(struct device *dev,
}
/* we're attaching a block device, disable raw namespace access */
devm_nsio_disable(dev, nsio);
devm_namespace_disable(dev, ndns);
dev_set_drvdata(dev, pmem);
pmem->phys_addr = res->start;
@ -497,15 +500,16 @@ static int nd_pmem_probe(struct device *dev)
if (IS_ERR(ndns))
return PTR_ERR(ndns);
if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
return -ENXIO;
if (is_nd_btt(dev))
return nvdimm_namespace_attach_btt(ndns);
if (is_nd_pfn(dev))
return pmem_attach_disk(dev, ndns);
ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
if (ret)
return ret;
ret = nd_btt_probe(dev, ndns);
if (ret == 0)
return -ENXIO;
@ -532,6 +536,10 @@ static int nd_pmem_probe(struct device *dev)
return -ENXIO;
else if (ret == -EOPNOTSUPP)
return ret;
/* probe complete, attach handles namespace enabling */
devm_namespace_disable(dev, ndns);
return pmem_attach_disk(dev, ndns);
}

View File

@ -140,36 +140,6 @@ static void nd_region_release(struct device *dev)
kfree(nd_region);
}
static struct device_type nd_blk_device_type = {
.name = "nd_blk",
.release = nd_region_release,
};
static struct device_type nd_pmem_device_type = {
.name = "nd_pmem",
.release = nd_region_release,
};
static struct device_type nd_volatile_device_type = {
.name = "nd_volatile",
.release = nd_region_release,
};
bool is_nd_pmem(struct device *dev)
{
return dev ? dev->type == &nd_pmem_device_type : false;
}
bool is_nd_blk(struct device *dev)
{
return dev ? dev->type == &nd_blk_device_type : false;
}
bool is_nd_volatile(struct device *dev)
{
return dev ? dev->type == &nd_volatile_device_type : false;
}
struct nd_region *to_nd_region(struct device *dev)
{
struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
@ -583,7 +553,7 @@ static ssize_t resource_show(struct device *dev,
return sprintf(buf, "%#llx\n", nd_region->ndr_start);
}
static DEVICE_ATTR_RO(resource);
static DEVICE_ATTR(resource, 0400, resource_show, NULL);
static ssize_t persistence_domain_show(struct device *dev,
struct device_attribute *attr, char *buf)
@ -635,12 +605,8 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
return 0;
if (a == &dev_attr_resource.attr) {
if (is_memory(dev))
return 0400;
else
return 0;
}
if (a == &dev_attr_resource.attr && !is_memory(dev))
return 0;
if (a == &dev_attr_deep_flush.attr) {
int has_flush = nvdimm_has_flush(nd_region);
@ -674,80 +640,6 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
return 0;
}
struct attribute_group nd_region_attribute_group = {
.attrs = nd_region_attributes,
.is_visible = region_visible,
};
EXPORT_SYMBOL_GPL(nd_region_attribute_group);
u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
struct nd_namespace_index *nsindex)
{
struct nd_interleave_set *nd_set = nd_region->nd_set;
if (!nd_set)
return 0;
if (nsindex && __le16_to_cpu(nsindex->major) == 1
&& __le16_to_cpu(nsindex->minor) == 1)
return nd_set->cookie1;
return nd_set->cookie2;
}
u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
{
struct nd_interleave_set *nd_set = nd_region->nd_set;
if (nd_set)
return nd_set->altcookie;
return 0;
}
void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
{
struct nd_label_ent *label_ent, *e;
lockdep_assert_held(&nd_mapping->lock);
list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
list_del(&label_ent->list);
kfree(label_ent);
}
}
/*
* When a namespace is activated create new seeds for the next
* namespace, or namespace-personality to be configured.
*/
void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
{
nvdimm_bus_lock(dev);
if (nd_region->ns_seed == dev) {
nd_region_create_ns_seed(nd_region);
} else if (is_nd_btt(dev)) {
struct nd_btt *nd_btt = to_nd_btt(dev);
if (nd_region->btt_seed == dev)
nd_region_create_btt_seed(nd_region);
if (nd_region->ns_seed == &nd_btt->ndns->dev)
nd_region_create_ns_seed(nd_region);
} else if (is_nd_pfn(dev)) {
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
if (nd_region->pfn_seed == dev)
nd_region_create_pfn_seed(nd_region);
if (nd_region->ns_seed == &nd_pfn->ndns->dev)
nd_region_create_ns_seed(nd_region);
} else if (is_nd_dax(dev)) {
struct nd_dax *nd_dax = to_nd_dax(dev);
if (nd_region->dax_seed == dev)
nd_region_create_dax_seed(nd_region);
if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
nd_region_create_ns_seed(nd_region);
}
nvdimm_bus_unlock(dev);
}
static ssize_t mappingN(struct device *dev, char *buf, int n)
{
struct nd_region *nd_region = to_nd_region(dev);
@ -855,11 +747,124 @@ static struct attribute *mapping_attributes[] = {
NULL,
};
struct attribute_group nd_mapping_attribute_group = {
static const struct attribute_group nd_mapping_attribute_group = {
.is_visible = mapping_visible,
.attrs = mapping_attributes,
};
EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
static const struct attribute_group nd_region_attribute_group = {
.attrs = nd_region_attributes,
.is_visible = region_visible,
};
static const struct attribute_group *nd_region_attribute_groups[] = {
&nd_device_attribute_group,
&nd_region_attribute_group,
&nd_numa_attribute_group,
&nd_mapping_attribute_group,
NULL,
};
static const struct device_type nd_blk_device_type = {
.name = "nd_blk",
.release = nd_region_release,
.groups = nd_region_attribute_groups,
};
static const struct device_type nd_pmem_device_type = {
.name = "nd_pmem",
.release = nd_region_release,
.groups = nd_region_attribute_groups,
};
static const struct device_type nd_volatile_device_type = {
.name = "nd_volatile",
.release = nd_region_release,
.groups = nd_region_attribute_groups,
};
bool is_nd_pmem(struct device *dev)
{
return dev ? dev->type == &nd_pmem_device_type : false;
}
bool is_nd_blk(struct device *dev)
{
return dev ? dev->type == &nd_blk_device_type : false;
}
bool is_nd_volatile(struct device *dev)
{
return dev ? dev->type == &nd_volatile_device_type : false;
}
u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
struct nd_namespace_index *nsindex)
{
struct nd_interleave_set *nd_set = nd_region->nd_set;
if (!nd_set)
return 0;
if (nsindex && __le16_to_cpu(nsindex->major) == 1
&& __le16_to_cpu(nsindex->minor) == 1)
return nd_set->cookie1;
return nd_set->cookie2;
}
u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
{
struct nd_interleave_set *nd_set = nd_region->nd_set;
if (nd_set)
return nd_set->altcookie;
return 0;
}
void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
{
struct nd_label_ent *label_ent, *e;
lockdep_assert_held(&nd_mapping->lock);
list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
list_del(&label_ent->list);
kfree(label_ent);
}
}
/*
* When a namespace is activated create new seeds for the next
* namespace, or namespace-personality to be configured.
*/
void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
{
nvdimm_bus_lock(dev);
if (nd_region->ns_seed == dev) {
nd_region_create_ns_seed(nd_region);
} else if (is_nd_btt(dev)) {
struct nd_btt *nd_btt = to_nd_btt(dev);
if (nd_region->btt_seed == dev)
nd_region_create_btt_seed(nd_region);
if (nd_region->ns_seed == &nd_btt->ndns->dev)
nd_region_create_ns_seed(nd_region);
} else if (is_nd_pfn(dev)) {
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
if (nd_region->pfn_seed == dev)
nd_region_create_pfn_seed(nd_region);
if (nd_region->ns_seed == &nd_pfn->ndns->dev)
nd_region_create_ns_seed(nd_region);
} else if (is_nd_dax(dev)) {
struct nd_dax *nd_dax = to_nd_dax(dev);
if (nd_region->dax_seed == dev)
nd_region_create_dax_seed(nd_region);
if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
nd_region_create_ns_seed(nd_region);
}
nvdimm_bus_unlock(dev);
}
int nd_blk_region_init(struct nd_region *nd_region)
{
@ -931,8 +936,8 @@ void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
EXPORT_SYMBOL(nd_region_release_lane);
static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc, struct device_type *dev_type,
const char *caller)
struct nd_region_desc *ndr_desc,
const struct device_type *dev_type, const char *caller)
{
struct nd_region *nd_region;
struct device *dev;

View File

@ -65,13 +65,6 @@ enum {
DPA_RESOURCE_ADJUSTED = 1 << 0,
};
extern struct attribute_group nvdimm_bus_attribute_group;
extern struct attribute_group nvdimm_attribute_group;
extern struct attribute_group nd_device_attribute_group;
extern struct attribute_group nd_numa_attribute_group;
extern struct attribute_group nd_region_attribute_group;
extern struct attribute_group nd_mapping_attribute_group;
struct nvdimm;
struct nvdimm_bus_descriptor;
typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,

View File

@ -147,7 +147,7 @@ static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
/**
* nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace
* @ndns: device to read
* @ndns: device to write
* @offset: namespace-relative starting offset
* @buf: buffer to drain
* @size: transfer length