mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-29 13:53:33 +00:00
debdce20c4
For the numa nodes that are not created by SRAT, no memory_target is allocated and is not managed by the HMAT_REPORTING code. Therefore hmat_callback() memory hotplug notifier will exit early on those NUMA nodes. The CXL memory hotplug notifier will need to call node_set_perf_attrs() directly in order to setup the access sysfs attributes. In acpi_numa_init(), the last proximity domain (pxm) id created by SRAT is stored. Add a helper function acpi_node_backed_by_real_pxm() in order to check if a NUMA node id is defined by SRAT or created by CFMWS. node_set_perf_attrs() symbol is exported to allow update of perf attribs for a node. The sysfs path of /sys/devices/system/node/nodeX/access0/initiators/* is created by node_set_perf_attrs() for the various attributes where nodeX is matched to the NUMA node of the CXL region. Cc: Rafael J. Wysocki <rafael@kernel.org> Reviewed-by: Alison Schofield <alison.schofield@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Tested-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/20240308220055.2172956-13-dave.jiang@intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
97 lines
3.1 KiB
C
97 lines
3.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/* Copyright(c) 2020 Intel Corporation. */
|
|
|
|
#ifndef __CXL_CORE_H__
|
|
#define __CXL_CORE_H__
|
|
|
|
extern const struct device_type cxl_nvdimm_bridge_type;
|
|
extern const struct device_type cxl_nvdimm_type;
|
|
extern const struct device_type cxl_pmu_type;
|
|
|
|
extern struct attribute_group cxl_base_attribute_group;
|
|
|
|
#ifdef CONFIG_CXL_REGION
|
|
extern struct device_attribute dev_attr_create_pmem_region;
|
|
extern struct device_attribute dev_attr_create_ram_region;
|
|
extern struct device_attribute dev_attr_delete_region;
|
|
extern struct device_attribute dev_attr_region;
|
|
extern const struct device_type cxl_pmem_region_type;
|
|
extern const struct device_type cxl_dax_region_type;
|
|
extern const struct device_type cxl_region_type;
|
|
void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
|
|
#define CXL_REGION_ATTR(x) (&dev_attr_##x.attr)
|
|
#define CXL_REGION_TYPE(x) (&cxl_region_type)
|
|
#define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr),
|
|
#define CXL_PMEM_REGION_TYPE(x) (&cxl_pmem_region_type)
|
|
#define CXL_DAX_REGION_TYPE(x) (&cxl_dax_region_type)
|
|
int cxl_region_init(void);
|
|
void cxl_region_exit(void);
|
|
int cxl_get_poison_by_endpoint(struct cxl_port *port);
|
|
#else
|
|
static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
|
|
{
|
|
}
|
|
static inline int cxl_region_init(void)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline void cxl_region_exit(void)
|
|
{
|
|
}
|
|
#define CXL_REGION_ATTR(x) NULL
|
|
#define CXL_REGION_TYPE(x) NULL
|
|
#define SET_CXL_REGION_ATTR(x)
|
|
#define CXL_PMEM_REGION_TYPE(x) NULL
|
|
#define CXL_DAX_REGION_TYPE(x) NULL
|
|
#endif
|
|
|
|
struct cxl_send_command;
|
|
struct cxl_mem_query_commands;
|
|
int cxl_query_cmd(struct cxl_memdev *cxlmd,
|
|
struct cxl_mem_query_commands __user *q);
|
|
int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s);
|
|
void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
|
|
resource_size_t length);
|
|
|
|
struct dentry *cxl_debugfs_create_dir(const char *dir);
|
|
int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
|
|
enum cxl_decoder_mode mode);
|
|
int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size);
|
|
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
|
|
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
|
|
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
|
|
|
|
enum cxl_rcrb {
|
|
CXL_RCRB_DOWNSTREAM,
|
|
CXL_RCRB_UPSTREAM,
|
|
};
|
|
struct cxl_rcrb_info;
|
|
resource_size_t __rcrb_to_component(struct device *dev,
|
|
struct cxl_rcrb_info *ri,
|
|
enum cxl_rcrb which);
|
|
u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb);
|
|
|
|
extern struct rw_semaphore cxl_dpa_rwsem;
|
|
extern struct rw_semaphore cxl_region_rwsem;
|
|
|
|
int cxl_memdev_init(void);
|
|
void cxl_memdev_exit(void);
|
|
void cxl_mbox_init(void);
|
|
|
|
enum cxl_poison_trace_type {
|
|
CXL_POISON_TRACE_LIST,
|
|
CXL_POISON_TRACE_INJECT,
|
|
CXL_POISON_TRACE_CLEAR,
|
|
};
|
|
|
|
long cxl_pci_get_latency(struct pci_dev *pdev);
|
|
|
|
int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
|
|
enum access_coordinate_class access);
|
|
bool cxl_need_node_perf_attrs_update(int nid);
|
|
|
|
#endif /* __CXL_CORE_H__ */
|