2018-03-14 21:15:19 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2014-11-12 10:39:03 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2014 Intel Corp.
|
|
|
|
* Author: Jiang Liu <jiang.liu@linux.intel.com>
|
|
|
|
*
|
|
|
|
* This file is licensed under GPLv2.
|
|
|
|
*
|
2021-03-22 03:21:30 +00:00
|
|
|
* This file contains common code to support Message Signaled Interrupts for
|
2014-11-12 10:39:03 +00:00
|
|
|
* PCI compatible and non PCI compatible devices.
|
|
|
|
*/
|
2014-11-15 14:24:05 +00:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/device.h>
|
2014-11-12 10:39:03 +00:00
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/irqdomain.h>
|
|
|
|
#include <linux/msi.h>
|
2016-11-22 09:21:16 +00:00
|
|
|
#include <linux/slab.h>
|
2021-12-06 22:27:31 +00:00
|
|
|
#include <linux/sysfs.h>
|
2021-08-13 03:56:27 +00:00
|
|
|
#include <linux/pci.h>
|
2014-11-15 14:24:04 +00:00
|
|
|
|
2017-09-13 21:29:05 +00:00
|
|
|
#include "internals.h"
|
|
|
|
|
2016-09-14 14:18:47 +00:00
|
|
|
/**
|
2021-08-10 23:48:35 +00:00
|
|
|
* alloc_msi_entry - Allocate an initialized msi_desc
|
2016-09-14 14:18:47 +00:00
|
|
|
* @dev: Pointer to the device for which this is allocated
|
|
|
|
* @nvec: The number of vectors used in this entry
|
|
|
|
* @affinity: Optional pointer to an affinity mask array size of @nvec
|
|
|
|
*
|
2021-08-10 23:48:35 +00:00
|
|
|
* If @affinity is not %NULL then an affinity array[@nvec] is allocated
|
2018-12-04 15:51:20 +00:00
|
|
|
* and the affinity masks and flags from @affinity are copied.
|
2021-08-10 23:48:35 +00:00
|
|
|
*
|
|
|
|
* Return: pointer to allocated &msi_desc on success or %NULL on failure
|
2016-09-14 14:18:47 +00:00
|
|
|
*/
|
2018-12-04 15:51:20 +00:00
|
|
|
struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
|
|
|
|
const struct irq_affinity_desc *affinity)
|
2015-07-09 08:00:47 +00:00
|
|
|
{
|
2016-09-14 14:18:47 +00:00
|
|
|
struct msi_desc *desc;
|
|
|
|
|
|
|
|
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
|
2015-07-09 08:00:47 +00:00
|
|
|
if (!desc)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&desc->list);
|
|
|
|
desc->dev = dev;
|
2016-09-14 14:18:47 +00:00
|
|
|
desc->nvec_used = nvec;
|
|
|
|
if (affinity) {
|
|
|
|
desc->affinity = kmemdup(affinity,
|
|
|
|
nvec * sizeof(*desc->affinity), GFP_KERNEL);
|
|
|
|
if (!desc->affinity) {
|
|
|
|
kfree(desc);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
2015-07-09 08:00:47 +00:00
|
|
|
|
|
|
|
return desc;
|
|
|
|
}
|
|
|
|
|
|
|
|
void free_msi_entry(struct msi_desc *entry)
|
|
|
|
{
|
2016-09-14 14:18:47 +00:00
|
|
|
kfree(entry->affinity);
|
2015-07-09 08:00:47 +00:00
|
|
|
kfree(entry);
|
|
|
|
}
|
|
|
|
|
2021-12-06 22:51:10 +00:00
|
|
|
/**
|
|
|
|
* msi_add_msi_desc - Allocate and initialize a MSI descriptor
|
|
|
|
* @dev: Pointer to the device for which the descriptor is allocated
|
|
|
|
* @init_desc: Pointer to an MSI descriptor to initialize the new descriptor
|
|
|
|
*
|
|
|
|
* Return: 0 on success or an appropriate failure code.
|
|
|
|
*/
|
|
|
|
int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc)
|
|
|
|
{
|
|
|
|
struct msi_desc *desc;
|
|
|
|
|
|
|
|
lockdep_assert_held(&dev->msi.data->mutex);
|
|
|
|
|
|
|
|
desc = alloc_msi_entry(dev, init_desc->nvec_used, init_desc->affinity);
|
|
|
|
if (!desc)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* Copy the MSI index and type specific data to the new descriptor. */
|
|
|
|
desc->msi_index = init_desc->msi_index;
|
|
|
|
desc->pci = init_desc->pci;
|
|
|
|
|
|
|
|
list_add_tail(&desc->list, &dev->msi.data->list);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* msi_add_simple_msi_descs - Allocate and initialize MSI descriptors
|
|
|
|
* @dev: Pointer to the device for which the descriptors are allocated
|
|
|
|
* @index: Index for the first MSI descriptor
|
|
|
|
* @ndesc: Number of descriptors to allocate
|
|
|
|
*
|
|
|
|
* Return: 0 on success or an appropriate failure code.
|
|
|
|
*/
|
|
|
|
static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsigned int ndesc)
|
|
|
|
{
|
|
|
|
struct msi_desc *desc, *tmp;
|
|
|
|
LIST_HEAD(list);
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
lockdep_assert_held(&dev->msi.data->mutex);
|
|
|
|
|
|
|
|
for (i = 0; i < ndesc; i++) {
|
|
|
|
desc = alloc_msi_entry(dev, 1, NULL);
|
|
|
|
if (!desc)
|
|
|
|
goto fail;
|
|
|
|
desc->msi_index = index + i;
|
|
|
|
list_add_tail(&desc->list, &list);
|
|
|
|
}
|
|
|
|
list_splice_tail(&list, &dev->msi.data->list);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
list_for_each_entry_safe(desc, tmp, &list, list) {
|
|
|
|
list_del(&desc->list);
|
|
|
|
free_msi_entry(desc);
|
|
|
|
}
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2021-12-06 22:51:12 +00:00
|
|
|
/**
|
|
|
|
* msi_free_msi_descs_range - Free MSI descriptors of a device
|
|
|
|
* @dev: Device to free the descriptors
|
|
|
|
* @filter: Descriptor state filter
|
|
|
|
* @first_index: Index to start freeing from
|
|
|
|
* @last_index: Last index to be freed
|
|
|
|
*/
|
|
|
|
void msi_free_msi_descs_range(struct device *dev, enum msi_desc_filter filter,
|
|
|
|
unsigned int first_index, unsigned int last_index)
|
|
|
|
{
|
|
|
|
struct msi_desc *desc;
|
|
|
|
|
|
|
|
lockdep_assert_held(&dev->msi.data->mutex);
|
|
|
|
|
|
|
|
msi_for_each_desc(desc, dev, filter) {
|
|
|
|
/*
|
|
|
|
* Stupid for now to handle MSI device domain until the
|
|
|
|
* storage is switched over to an xarray.
|
|
|
|
*/
|
|
|
|
if (desc->msi_index < first_index || desc->msi_index > last_index)
|
|
|
|
continue;
|
|
|
|
list_del(&desc->list);
|
|
|
|
free_msi_entry(desc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-12 11:11:25 +00:00
|
|
|
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
|
|
|
{
|
|
|
|
*msg = entry->msg;
|
|
|
|
}
|
|
|
|
|
|
|
|
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
|
|
|
|
{
|
|
|
|
struct msi_desc *entry = irq_get_msi_desc(irq);
|
|
|
|
|
|
|
|
__get_cached_msi_msg(entry, msg);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(get_cached_msi_msg);
|
|
|
|
|
2021-12-10 22:18:55 +00:00
|
|
|
static void msi_device_data_release(struct device *dev, void *res)
|
|
|
|
{
|
2021-12-06 22:51:04 +00:00
|
|
|
struct msi_device_data *md = res;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(!list_empty(&md->list));
|
2021-12-10 22:18:55 +00:00
|
|
|
dev->msi.data = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* msi_setup_device_data - Setup MSI device data
|
|
|
|
* @dev: Device for which MSI device data should be set up
|
|
|
|
*
|
|
|
|
* Return: 0 on success, appropriate error code otherwise
|
|
|
|
*
|
|
|
|
* This can be called more than once for @dev. If the MSI device data is
|
|
|
|
* already allocated the call succeeds. The allocated memory is
|
|
|
|
* automatically released when the device is destroyed.
|
|
|
|
*/
|
|
|
|
int msi_setup_device_data(struct device *dev)
|
|
|
|
{
|
|
|
|
struct msi_device_data *md;
|
|
|
|
|
|
|
|
if (dev->msi.data)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL);
|
|
|
|
if (!md)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2021-12-06 22:51:04 +00:00
|
|
|
INIT_LIST_HEAD(&md->list);
|
2021-12-06 22:51:05 +00:00
|
|
|
mutex_init(&md->mutex);
|
2021-12-10 22:18:55 +00:00
|
|
|
dev->msi.data = md;
|
|
|
|
devres_add(dev, md);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-12-06 22:51:05 +00:00
|
|
|
/**
|
|
|
|
* msi_lock_descs - Lock the MSI descriptor storage of a device
|
|
|
|
* @dev: Device to operate on
|
|
|
|
*/
|
|
|
|
void msi_lock_descs(struct device *dev)
|
|
|
|
{
|
|
|
|
mutex_lock(&dev->msi.data->mutex);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(msi_lock_descs);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* msi_unlock_descs - Unlock the MSI descriptor storage of a device
|
|
|
|
* @dev: Device to operate on
|
|
|
|
*/
|
|
|
|
void msi_unlock_descs(struct device *dev)
|
|
|
|
{
|
2021-12-06 22:51:08 +00:00
|
|
|
/* Clear the next pointer which was cached by the iterator */
|
|
|
|
dev->msi.data->__next = NULL;
|
2021-12-06 22:51:05 +00:00
|
|
|
mutex_unlock(&dev->msi.data->mutex);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(msi_unlock_descs);
|
|
|
|
|
2021-12-06 22:51:08 +00:00
|
|
|
static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter)
|
|
|
|
{
|
|
|
|
switch (filter) {
|
|
|
|
case MSI_DESC_ALL:
|
|
|
|
return true;
|
|
|
|
case MSI_DESC_NOTASSOCIATED:
|
|
|
|
return !desc->irq;
|
|
|
|
case MSI_DESC_ASSOCIATED:
|
|
|
|
return !!desc->irq;
|
|
|
|
}
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct msi_desc *msi_find_first_desc(struct device *dev, enum msi_desc_filter filter)
|
|
|
|
{
|
|
|
|
struct msi_desc *desc;
|
|
|
|
|
|
|
|
list_for_each_entry(desc, dev_to_msi_list(dev), list) {
|
|
|
|
if (msi_desc_match(desc, filter))
|
|
|
|
return desc;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* msi_first_desc - Get the first MSI descriptor of a device
|
|
|
|
* @dev: Device to operate on
|
|
|
|
* @filter: Descriptor state filter
|
|
|
|
*
|
|
|
|
* Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
|
|
|
|
* must be invoked before the call.
|
|
|
|
*
|
|
|
|
* Return: Pointer to the first MSI descriptor matching the search
|
|
|
|
* criteria, NULL if none found.
|
|
|
|
*/
|
|
|
|
struct msi_desc *msi_first_desc(struct device *dev, enum msi_desc_filter filter)
|
|
|
|
{
|
|
|
|
struct msi_desc *desc;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!dev->msi.data))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
lockdep_assert_held(&dev->msi.data->mutex);
|
|
|
|
|
|
|
|
desc = msi_find_first_desc(dev, filter);
|
|
|
|
dev->msi.data->__next = desc ? list_next_entry(desc, list) : NULL;
|
|
|
|
return desc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(msi_first_desc);
|
|
|
|
|
|
|
|
static struct msi_desc *__msi_next_desc(struct device *dev, enum msi_desc_filter filter,
|
|
|
|
struct msi_desc *from)
|
|
|
|
{
|
|
|
|
struct msi_desc *desc = from;
|
|
|
|
|
|
|
|
list_for_each_entry_from(desc, dev_to_msi_list(dev), list) {
|
|
|
|
if (msi_desc_match(desc, filter))
|
|
|
|
return desc;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* msi_next_desc - Get the next MSI descriptor of a device
|
|
|
|
* @dev: Device to operate on
|
|
|
|
*
|
|
|
|
* The first invocation of msi_next_desc() has to be preceeded by a
|
|
|
|
* successful incovation of __msi_first_desc(). Consecutive invocations are
|
|
|
|
* only valid if the previous one was successful. All these operations have
|
|
|
|
* to be done within the same MSI mutex held region.
|
|
|
|
*
|
|
|
|
* Return: Pointer to the next MSI descriptor matching the search
|
|
|
|
* criteria, NULL if none found.
|
|
|
|
*/
|
|
|
|
struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter)
|
|
|
|
{
|
|
|
|
struct msi_device_data *data = dev->msi.data;
|
|
|
|
struct msi_desc *desc;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!data))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
lockdep_assert_held(&data->mutex);
|
|
|
|
|
|
|
|
if (!data->__next)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
desc = __msi_next_desc(dev, filter, data->__next);
|
|
|
|
dev->msi.data->__next = desc ? list_next_entry(desc, list) : NULL;
|
|
|
|
return desc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(msi_next_desc);
|
|
|
|
|
2021-12-10 22:19:23 +00:00
|
|
|
/**
|
|
|
|
* msi_get_virq - Return Linux interrupt number of a MSI interrupt
|
|
|
|
* @dev: Device to operate on
|
|
|
|
* @index: MSI interrupt index to look for (0-based)
|
|
|
|
*
|
|
|
|
* Return: The Linux interrupt number on success (> 0), 0 if not found
|
|
|
|
*/
|
|
|
|
unsigned int msi_get_virq(struct device *dev, unsigned int index)
|
|
|
|
{
|
|
|
|
struct msi_desc *desc;
|
|
|
|
bool pcimsi;
|
|
|
|
|
|
|
|
if (!dev->msi.data)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pcimsi = dev_is_pci(dev) ? to_pci_dev(dev)->msi_enabled : false;
|
|
|
|
|
|
|
|
for_each_msi_entry(desc, dev) {
|
|
|
|
/* PCI-MSI has only one descriptor for multiple interrupts. */
|
|
|
|
if (pcimsi) {
|
|
|
|
if (desc->irq && index < desc->nvec_used)
|
|
|
|
return desc->irq + index;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PCI-MSIX and platform MSI use a descriptor per
|
|
|
|
* interrupt.
|
|
|
|
*/
|
|
|
|
if (desc->msi_index == index)
|
|
|
|
return desc->irq;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(msi_get_virq);
|
|
|
|
|
2021-12-06 22:27:28 +00:00
|
|
|
#ifdef CONFIG_SYSFS
|
2021-08-13 03:56:27 +00:00
|
|
|
static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
2021-12-10 22:18:49 +00:00
|
|
|
/* MSI vs. MSIX is per device not per interrupt */
|
|
|
|
bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false;
|
2021-08-13 03:56:27 +00:00
|
|
|
|
|
|
|
return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* msi_populate_sysfs - Populate msi_irqs sysfs entries for devices
|
|
|
|
* @dev: The device(PCI, platform etc) who will get sysfs entries
|
|
|
|
*/
|
2021-12-10 22:19:08 +00:00
|
|
|
static const struct attribute_group **msi_populate_sysfs(struct device *dev)
|
2021-08-13 03:56:27 +00:00
|
|
|
{
|
|
|
|
const struct attribute_group **msi_irq_groups;
|
|
|
|
struct attribute **msi_attrs, *msi_attr;
|
|
|
|
struct device_attribute *msi_dev_attr;
|
|
|
|
struct attribute_group *msi_irq_group;
|
|
|
|
struct msi_desc *entry;
|
|
|
|
int ret = -ENOMEM;
|
|
|
|
int num_msi = 0;
|
|
|
|
int count = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Determine how many msi entries we have */
|
|
|
|
for_each_msi_entry(entry, dev)
|
|
|
|
num_msi += entry->nvec_used;
|
|
|
|
if (!num_msi)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Dynamically create the MSI attributes for the device */
|
|
|
|
msi_attrs = kcalloc(num_msi + 1, sizeof(void *), GFP_KERNEL);
|
|
|
|
if (!msi_attrs)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
for_each_msi_entry(entry, dev) {
|
|
|
|
for (i = 0; i < entry->nvec_used; i++) {
|
|
|
|
msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
|
|
|
|
if (!msi_dev_attr)
|
|
|
|
goto error_attrs;
|
|
|
|
msi_attrs[count] = &msi_dev_attr->attr;
|
|
|
|
|
|
|
|
sysfs_attr_init(&msi_dev_attr->attr);
|
|
|
|
msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
|
|
|
|
entry->irq + i);
|
|
|
|
if (!msi_dev_attr->attr.name)
|
|
|
|
goto error_attrs;
|
|
|
|
msi_dev_attr->attr.mode = 0444;
|
|
|
|
msi_dev_attr->show = msi_mode_show;
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
|
|
|
|
if (!msi_irq_group)
|
|
|
|
goto error_attrs;
|
|
|
|
msi_irq_group->name = "msi_irqs";
|
|
|
|
msi_irq_group->attrs = msi_attrs;
|
|
|
|
|
|
|
|
msi_irq_groups = kcalloc(2, sizeof(void *), GFP_KERNEL);
|
|
|
|
if (!msi_irq_groups)
|
|
|
|
goto error_irq_group;
|
|
|
|
msi_irq_groups[0] = msi_irq_group;
|
|
|
|
|
|
|
|
ret = sysfs_create_groups(&dev->kobj, msi_irq_groups);
|
|
|
|
if (ret)
|
|
|
|
goto error_irq_groups;
|
|
|
|
|
|
|
|
return msi_irq_groups;
|
|
|
|
|
|
|
|
error_irq_groups:
|
|
|
|
kfree(msi_irq_groups);
|
|
|
|
error_irq_group:
|
|
|
|
kfree(msi_irq_group);
|
|
|
|
error_attrs:
|
|
|
|
count = 0;
|
|
|
|
msi_attr = msi_attrs[count];
|
|
|
|
while (msi_attr) {
|
|
|
|
msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
|
|
|
|
kfree(msi_attr->name);
|
|
|
|
kfree(msi_dev_attr);
|
|
|
|
++count;
|
|
|
|
msi_attr = msi_attrs[count];
|
|
|
|
}
|
|
|
|
kfree(msi_attrs);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
2021-12-10 22:19:03 +00:00
|
|
|
/**
|
|
|
|
* msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
|
|
|
|
* @dev: The device (PCI, platform etc) which will get sysfs entries
|
|
|
|
*/
|
|
|
|
int msi_device_populate_sysfs(struct device *dev)
|
|
|
|
{
|
|
|
|
const struct attribute_group **group = msi_populate_sysfs(dev);
|
|
|
|
|
|
|
|
if (IS_ERR(group))
|
|
|
|
return PTR_ERR(group);
|
|
|
|
dev->msi.data->attrs = group;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
|
|
|
|
* @dev: The device (PCI, platform etc) for which to remove
|
|
|
|
* sysfs entries
|
|
|
|
*/
|
|
|
|
void msi_device_destroy_sysfs(struct device *dev)
|
|
|
|
{
|
2021-12-10 22:19:08 +00:00
|
|
|
const struct attribute_group **msi_irq_groups = dev->msi.data->attrs;
|
|
|
|
struct device_attribute *dev_attr;
|
|
|
|
struct attribute **msi_attrs;
|
|
|
|
int count = 0;
|
|
|
|
|
2021-12-10 22:19:03 +00:00
|
|
|
dev->msi.data->attrs = NULL;
|
2021-12-10 22:19:08 +00:00
|
|
|
if (!msi_irq_groups)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sysfs_remove_groups(&dev->kobj, msi_irq_groups);
|
|
|
|
msi_attrs = msi_irq_groups[0]->attrs;
|
|
|
|
while (msi_attrs[count]) {
|
|
|
|
dev_attr = container_of(msi_attrs[count], struct device_attribute, attr);
|
|
|
|
kfree(dev_attr->attr.name);
|
|
|
|
kfree(dev_attr);
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
kfree(msi_attrs);
|
|
|
|
kfree(msi_irq_groups[0]);
|
|
|
|
kfree(msi_irq_groups);
|
2021-12-10 22:19:03 +00:00
|
|
|
}
|
2021-12-06 22:27:28 +00:00
|
|
|
#endif
|
2021-08-13 03:56:27 +00:00
|
|
|
|
2014-11-12 10:39:03 +00:00
|
|
|
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
|
2014-12-06 20:20:20 +00:00
|
|
|
static inline void irq_chip_write_msi_msg(struct irq_data *data,
|
|
|
|
struct msi_msg *msg)
|
|
|
|
{
|
|
|
|
data->chip->irq_write_msi_msg(data, msg);
|
|
|
|
}
|
|
|
|
|
genirq/msi: Allow level-triggered MSIs to be exposed by MSI providers
So far, MSIs have been used to signal edge-triggered interrupts, as
a write is a good model for an edge (you can't "unwrite" something).
On the other hand, routing zillions of wires in an SoC because you
need level interrupts is a bit extreme.
People have come up with a variety of schemes to support this, which
involves sending two messages: one to signal the interrupt, and one
to clear it. Since the kernel cannot represent this, we've ended up
with side-band mechanisms that are pretty awful.
Instead, let's acknoledge the requirement, and ensure that, under the
right circumstances, the irq_compose_msg and irq_write_msg can take
as a parameter an array of two messages instead of a pointer to a
single one. We also add some checking that the compose method only
clobbers the second message if the MSI domain has been created with
the MSI_FLAG_LEVEL_CAPABLE flags.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lkml.kernel.org/r/20180508121438.11301-2-marc.zyngier@arm.com
2018-05-08 12:14:30 +00:00
|
|
|
static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
|
|
|
|
{
|
|
|
|
struct msi_domain_info *info = domain->host_data;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the MSI provider has messed with the second message and
|
|
|
|
* not advertized that it is level-capable, signal the breakage.
|
|
|
|
*/
|
|
|
|
WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
|
|
|
|
(info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
|
|
|
|
(msg[1].address_lo || msg[1].address_hi || msg[1].data));
|
|
|
|
}
|
|
|
|
|
2014-11-12 10:39:03 +00:00
|
|
|
/**
|
|
|
|
* msi_domain_set_affinity - Generic affinity setter function for MSI domains
|
|
|
|
* @irq_data: The irq data associated to the interrupt
|
|
|
|
* @mask: The affinity mask to set
|
|
|
|
* @force: Flag to enforce setting (disable online checks)
|
|
|
|
*
|
|
|
|
* Intended to be used by MSI interrupt controllers which are
|
|
|
|
* implemented with hierarchical domains.
|
2021-08-10 23:48:35 +00:00
|
|
|
*
|
|
|
|
* Return: IRQ_SET_MASK_* result code
|
2014-11-12 10:39:03 +00:00
|
|
|
*/
|
|
|
|
int msi_domain_set_affinity(struct irq_data *irq_data,
|
|
|
|
const struct cpumask *mask, bool force)
|
|
|
|
{
|
|
|
|
struct irq_data *parent = irq_data->parent_data;
|
genirq/msi: Allow level-triggered MSIs to be exposed by MSI providers
So far, MSIs have been used to signal edge-triggered interrupts, as
a write is a good model for an edge (you can't "unwrite" something).
On the other hand, routing zillions of wires in an SoC because you
need level interrupts is a bit extreme.
People have come up with a variety of schemes to support this, which
involves sending two messages: one to signal the interrupt, and one
to clear it. Since the kernel cannot represent this, we've ended up
with side-band mechanisms that are pretty awful.
Instead, let's acknoledge the requirement, and ensure that, under the
right circumstances, the irq_compose_msg and irq_write_msg can take
as a parameter an array of two messages instead of a pointer to a
single one. We also add some checking that the compose method only
clobbers the second message if the MSI domain has been created with
the MSI_FLAG_LEVEL_CAPABLE flags.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lkml.kernel.org/r/20180508121438.11301-2-marc.zyngier@arm.com
2018-05-08 12:14:30 +00:00
|
|
|
struct msi_msg msg[2] = { [1] = { }, };
|
2014-11-12 10:39:03 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = parent->chip->irq_set_affinity(parent, mask, force);
|
|
|
|
if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
|
genirq/msi: Allow level-triggered MSIs to be exposed by MSI providers
So far, MSIs have been used to signal edge-triggered interrupts, as
a write is a good model for an edge (you can't "unwrite" something).
On the other hand, routing zillions of wires in an SoC because you
need level interrupts is a bit extreme.
People have come up with a variety of schemes to support this, which
involves sending two messages: one to signal the interrupt, and one
to clear it. Since the kernel cannot represent this, we've ended up
with side-band mechanisms that are pretty awful.
Instead, let's acknoledge the requirement, and ensure that, under the
right circumstances, the irq_compose_msg and irq_write_msg can take
as a parameter an array of two messages instead of a pointer to a
single one. We also add some checking that the compose method only
clobbers the second message if the MSI domain has been created with
the MSI_FLAG_LEVEL_CAPABLE flags.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lkml.kernel.org/r/20180508121438.11301-2-marc.zyngier@arm.com
2018-05-08 12:14:30 +00:00
|
|
|
BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
|
|
|
|
msi_check_level(irq_data->domain, msg);
|
|
|
|
irq_chip_write_msi_msg(irq_data, msg);
|
2014-11-12 10:39:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-09-13 21:29:10 +00:00
|
|
|
static int msi_domain_activate(struct irq_domain *domain,
|
|
|
|
struct irq_data *irq_data, bool early)
|
2014-11-12 10:39:03 +00:00
|
|
|
{
|
genirq/msi: Allow level-triggered MSIs to be exposed by MSI providers
So far, MSIs have been used to signal edge-triggered interrupts, as
a write is a good model for an edge (you can't "unwrite" something).
On the other hand, routing zillions of wires in an SoC because you
need level interrupts is a bit extreme.
People have come up with a variety of schemes to support this, which
involves sending two messages: one to signal the interrupt, and one
to clear it. Since the kernel cannot represent this, we've ended up
with side-band mechanisms that are pretty awful.
Instead, let's acknoledge the requirement, and ensure that, under the
right circumstances, the irq_compose_msg and irq_write_msg can take
as a parameter an array of two messages instead of a pointer to a
single one. We also add some checking that the compose method only
clobbers the second message if the MSI domain has been created with
the MSI_FLAG_LEVEL_CAPABLE flags.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lkml.kernel.org/r/20180508121438.11301-2-marc.zyngier@arm.com
2018-05-08 12:14:30 +00:00
|
|
|
struct msi_msg msg[2] = { [1] = { }, };
|
2014-11-12 10:39:03 +00:00
|
|
|
|
genirq/msi: Allow level-triggered MSIs to be exposed by MSI providers
So far, MSIs have been used to signal edge-triggered interrupts, as
a write is a good model for an edge (you can't "unwrite" something).
On the other hand, routing zillions of wires in an SoC because you
need level interrupts is a bit extreme.
People have come up with a variety of schemes to support this, which
involves sending two messages: one to signal the interrupt, and one
to clear it. Since the kernel cannot represent this, we've ended up
with side-band mechanisms that are pretty awful.
Instead, let's acknoledge the requirement, and ensure that, under the
right circumstances, the irq_compose_msg and irq_write_msg can take
as a parameter an array of two messages instead of a pointer to a
single one. We also add some checking that the compose method only
clobbers the second message if the MSI domain has been created with
the MSI_FLAG_LEVEL_CAPABLE flags.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lkml.kernel.org/r/20180508121438.11301-2-marc.zyngier@arm.com
2018-05-08 12:14:30 +00:00
|
|
|
BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
|
|
|
|
msi_check_level(irq_data->domain, msg);
|
|
|
|
irq_chip_write_msi_msg(irq_data, msg);
|
2017-09-13 21:29:10 +00:00
|
|
|
return 0;
|
2014-11-12 10:39:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void msi_domain_deactivate(struct irq_domain *domain,
|
|
|
|
struct irq_data *irq_data)
|
|
|
|
{
|
genirq/msi: Allow level-triggered MSIs to be exposed by MSI providers
So far, MSIs have been used to signal edge-triggered interrupts, as
a write is a good model for an edge (you can't "unwrite" something).
On the other hand, routing zillions of wires in an SoC because you
need level interrupts is a bit extreme.
People have come up with a variety of schemes to support this, which
involves sending two messages: one to signal the interrupt, and one
to clear it. Since the kernel cannot represent this, we've ended up
with side-band mechanisms that are pretty awful.
Instead, let's acknoledge the requirement, and ensure that, under the
right circumstances, the irq_compose_msg and irq_write_msg can take
as a parameter an array of two messages instead of a pointer to a
single one. We also add some checking that the compose method only
clobbers the second message if the MSI domain has been created with
the MSI_FLAG_LEVEL_CAPABLE flags.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lkml.kernel.org/r/20180508121438.11301-2-marc.zyngier@arm.com
2018-05-08 12:14:30 +00:00
|
|
|
struct msi_msg msg[2];
|
2014-11-12 10:39:03 +00:00
|
|
|
|
genirq/msi: Allow level-triggered MSIs to be exposed by MSI providers
So far, MSIs have been used to signal edge-triggered interrupts, as
a write is a good model for an edge (you can't "unwrite" something).
On the other hand, routing zillions of wires in an SoC because you
need level interrupts is a bit extreme.
People have come up with a variety of schemes to support this, which
involves sending two messages: one to signal the interrupt, and one
to clear it. Since the kernel cannot represent this, we've ended up
with side-band mechanisms that are pretty awful.
Instead, let's acknoledge the requirement, and ensure that, under the
right circumstances, the irq_compose_msg and irq_write_msg can take
as a parameter an array of two messages instead of a pointer to a
single one. We also add some checking that the compose method only
clobbers the second message if the MSI domain has been created with
the MSI_FLAG_LEVEL_CAPABLE flags.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lkml.kernel.org/r/20180508121438.11301-2-marc.zyngier@arm.com
2018-05-08 12:14:30 +00:00
|
|
|
memset(msg, 0, sizeof(msg));
|
|
|
|
irq_chip_write_msi_msg(irq_data, msg);
|
2014-11-12 10:39:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
|
|
|
unsigned int nr_irqs, void *arg)
|
|
|
|
{
|
|
|
|
struct msi_domain_info *info = domain->host_data;
|
|
|
|
struct msi_domain_ops *ops = info->ops;
|
|
|
|
irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
if (irq_find_mapping(domain, hwirq) > 0)
|
|
|
|
return -EEXIST;
|
|
|
|
|
2016-01-12 20:18:06 +00:00
|
|
|
if (domain->parent) {
|
|
|
|
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
2014-11-12 10:39:03 +00:00
|
|
|
|
|
|
|
for (i = 0; i < nr_irqs; i++) {
|
|
|
|
ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
|
|
|
|
if (ret < 0) {
|
|
|
|
if (ops->msi_free) {
|
|
|
|
for (i--; i > 0; i--)
|
|
|
|
ops->msi_free(domain, info, virq + i);
|
|
|
|
}
|
|
|
|
irq_domain_free_irqs_top(domain, virq, nr_irqs);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
|
|
|
|
unsigned int nr_irqs)
|
|
|
|
{
|
|
|
|
struct msi_domain_info *info = domain->host_data;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (info->ops->msi_free) {
|
|
|
|
for (i = 0; i < nr_irqs; i++)
|
|
|
|
info->ops->msi_free(domain, info, virq + i);
|
|
|
|
}
|
|
|
|
irq_domain_free_irqs_top(domain, virq, nr_irqs);
|
|
|
|
}
|
|
|
|
|
2015-04-27 12:54:23 +00:00
|
|
|
static const struct irq_domain_ops msi_domain_ops = {
|
2014-11-12 10:39:03 +00:00
|
|
|
.alloc = msi_domain_alloc,
|
|
|
|
.free = msi_domain_free,
|
|
|
|
.activate = msi_domain_activate,
|
|
|
|
.deactivate = msi_domain_deactivate,
|
|
|
|
};
|
|
|
|
|
2014-11-15 14:24:05 +00:00
|
|
|
static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
|
|
|
|
msi_alloc_info_t *arg)
|
|
|
|
{
|
|
|
|
return arg->hwirq;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
|
|
|
|
int nvec, msi_alloc_info_t *arg)
|
|
|
|
{
|
|
|
|
memset(arg, 0, sizeof(*arg));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
|
|
|
|
struct msi_desc *desc)
|
|
|
|
{
|
|
|
|
arg->desc = desc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int msi_domain_ops_init(struct irq_domain *domain,
|
|
|
|
struct msi_domain_info *info,
|
|
|
|
unsigned int virq, irq_hw_number_t hwirq,
|
|
|
|
msi_alloc_info_t *arg)
|
|
|
|
{
|
|
|
|
irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
|
|
|
|
info->chip_data);
|
|
|
|
if (info->handler && info->handler_name) {
|
|
|
|
__irq_set_handler(virq, info->handler, 0, info->handler_name);
|
|
|
|
if (info->handler_data)
|
|
|
|
irq_set_handler_data(virq, info->handler_data);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int msi_domain_ops_check(struct irq_domain *domain,
|
|
|
|
struct msi_domain_info *info,
|
|
|
|
struct device *dev)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct msi_domain_ops msi_domain_ops_default = {
|
2020-08-26 11:16:57 +00:00
|
|
|
.get_hwirq = msi_domain_ops_get_hwirq,
|
|
|
|
.msi_init = msi_domain_ops_init,
|
|
|
|
.msi_check = msi_domain_ops_check,
|
|
|
|
.msi_prepare = msi_domain_ops_prepare,
|
|
|
|
.set_desc = msi_domain_ops_set_desc,
|
|
|
|
.domain_alloc_irqs = __msi_domain_alloc_irqs,
|
|
|
|
.domain_free_irqs = __msi_domain_free_irqs,
|
2014-11-15 14:24:05 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void msi_domain_update_dom_ops(struct msi_domain_info *info)
|
|
|
|
{
|
|
|
|
struct msi_domain_ops *ops = info->ops;
|
|
|
|
|
|
|
|
if (ops == NULL) {
|
|
|
|
info->ops = &msi_domain_ops_default;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-08-26 11:16:57 +00:00
|
|
|
if (ops->domain_alloc_irqs == NULL)
|
|
|
|
ops->domain_alloc_irqs = msi_domain_ops_default.domain_alloc_irqs;
|
|
|
|
if (ops->domain_free_irqs == NULL)
|
|
|
|
ops->domain_free_irqs = msi_domain_ops_default.domain_free_irqs;
|
|
|
|
|
|
|
|
if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS))
|
|
|
|
return;
|
|
|
|
|
2014-11-15 14:24:05 +00:00
|
|
|
if (ops->get_hwirq == NULL)
|
|
|
|
ops->get_hwirq = msi_domain_ops_default.get_hwirq;
|
|
|
|
if (ops->msi_init == NULL)
|
|
|
|
ops->msi_init = msi_domain_ops_default.msi_init;
|
|
|
|
if (ops->msi_check == NULL)
|
|
|
|
ops->msi_check = msi_domain_ops_default.msi_check;
|
|
|
|
if (ops->msi_prepare == NULL)
|
|
|
|
ops->msi_prepare = msi_domain_ops_default.msi_prepare;
|
|
|
|
if (ops->set_desc == NULL)
|
|
|
|
ops->set_desc = msi_domain_ops_default.set_desc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void msi_domain_update_chip_ops(struct msi_domain_info *info)
|
|
|
|
{
|
|
|
|
struct irq_chip *chip = info->chip;
|
|
|
|
|
2015-10-13 18:14:45 +00:00
|
|
|
BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
|
2014-11-15 14:24:05 +00:00
|
|
|
if (!chip->irq_set_affinity)
|
|
|
|
chip->irq_set_affinity = msi_domain_set_affinity;
|
|
|
|
}
|
|
|
|
|
2014-11-12 10:39:03 +00:00
|
|
|
/**
|
2021-08-10 23:48:35 +00:00
|
|
|
* msi_create_irq_domain - Create an MSI interrupt domain
|
2015-10-13 11:51:44 +00:00
|
|
|
* @fwnode: Optional fwnode of the interrupt controller
|
2014-11-12 10:39:03 +00:00
|
|
|
* @info: MSI domain info
|
|
|
|
* @parent: Parent irq domain
|
2021-08-10 23:48:35 +00:00
|
|
|
*
|
|
|
|
* Return: pointer to the created &struct irq_domain or %NULL on failure
|
2014-11-12 10:39:03 +00:00
|
|
|
*/
|
2015-10-13 11:51:44 +00:00
|
|
|
struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
|
2014-11-12 10:39:03 +00:00
|
|
|
struct msi_domain_info *info,
|
|
|
|
struct irq_domain *parent)
|
|
|
|
{
|
2017-05-12 11:55:37 +00:00
|
|
|
struct irq_domain *domain;
|
|
|
|
|
2020-08-26 11:16:57 +00:00
|
|
|
msi_domain_update_dom_ops(info);
|
2014-11-15 14:24:05 +00:00
|
|
|
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
|
|
|
|
msi_domain_update_chip_ops(info);
|
2014-11-12 10:39:03 +00:00
|
|
|
|
2017-05-12 11:55:37 +00:00
|
|
|
domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
|
|
|
|
fwnode, &msi_domain_ops, info);
|
2017-06-19 23:37:04 +00:00
|
|
|
|
|
|
|
if (domain && !domain->name && info->chip)
|
2017-05-12 11:55:37 +00:00
|
|
|
domain->name = info->chip->name;
|
|
|
|
|
|
|
|
return domain;
|
2014-11-12 10:39:03 +00:00
|
|
|
}
|
|
|
|
|
2015-11-23 08:26:05 +00:00
|
|
|
int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
|
|
|
|
int nvec, msi_alloc_info_t *arg)
|
|
|
|
{
|
|
|
|
struct msi_domain_info *info = domain->host_data;
|
|
|
|
struct msi_domain_ops *ops = info->ops;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = ops->msi_check(domain, info, dev);
|
|
|
|
if (ret == 0)
|
|
|
|
ret = ops->msi_prepare(domain, dev, nvec, arg);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-11-23 08:26:06 +00:00
|
|
|
int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
|
2021-12-06 22:51:42 +00:00
|
|
|
int virq_base, int nvec, msi_alloc_info_t *arg)
|
2015-11-23 08:26:06 +00:00
|
|
|
{
|
|
|
|
struct msi_domain_info *info = domain->host_data;
|
|
|
|
struct msi_domain_ops *ops = info->ops;
|
|
|
|
struct msi_desc *desc;
|
2021-12-06 22:51:42 +00:00
|
|
|
int ret, virq;
|
2015-11-23 08:26:06 +00:00
|
|
|
|
2021-12-06 22:51:42 +00:00
|
|
|
msi_lock_descs(dev);
|
|
|
|
for (virq = virq_base; virq < virq_base + nvec; virq++) {
|
|
|
|
desc = alloc_msi_entry(dev, 1, NULL);
|
|
|
|
if (!desc) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto fail;
|
2015-11-23 08:26:06 +00:00
|
|
|
}
|
|
|
|
|
2021-12-06 22:51:42 +00:00
|
|
|
desc->msi_index = virq;
|
|
|
|
desc->irq = virq;
|
|
|
|
list_add_tail(&desc->list, &dev->msi.data->list);
|
2015-11-23 08:26:06 +00:00
|
|
|
|
|
|
|
ops->set_desc(arg, desc);
|
2021-12-06 22:51:42 +00:00
|
|
|
ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
|
2015-11-23 08:26:06 +00:00
|
|
|
if (ret)
|
2021-12-06 22:51:42 +00:00
|
|
|
goto fail;
|
2015-11-23 08:26:06 +00:00
|
|
|
|
2021-12-06 22:51:42 +00:00
|
|
|
irq_set_msi_desc(virq, desc);
|
2015-11-23 08:26:06 +00:00
|
|
|
}
|
2021-12-06 22:51:42 +00:00
|
|
|
msi_unlock_descs(dev);
|
|
|
|
return 0;
|
2015-11-23 08:26:06 +00:00
|
|
|
|
2021-12-06 22:51:42 +00:00
|
|
|
fail:
|
|
|
|
for (--virq; virq >= virq_base; virq--)
|
|
|
|
irq_domain_free_irqs_common(domain, virq, 1);
|
|
|
|
msi_free_msi_descs_range(dev, MSI_DESC_ALL, virq_base, virq_base + nvec - 1);
|
|
|
|
msi_unlock_descs(dev);
|
2015-11-23 08:26:06 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-12-29 09:47:22 +00:00
|
|
|
/*
|
|
|
|
* Carefully check whether the device can use reservation mode. If
|
|
|
|
* reservation mode is enabled then the early activation will assign a
|
|
|
|
* dummy vector to the device. If the PCI/MSI device does not support
|
|
|
|
* masking of the entry then this can result in spurious interrupts when
|
|
|
|
* the device driver is not absolutely careful. But even then a malfunction
|
|
|
|
* of the hardware could result in a spurious interrupt on the dummy vector
|
|
|
|
* and render the device unusable. If the entry can be masked then the core
|
|
|
|
* logic will prevent the spurious interrupt and reservation mode can be
|
|
|
|
* used. For now reservation mode is restricted to PCI/MSI.
|
|
|
|
*/
|
|
|
|
static bool msi_check_reservation_mode(struct irq_domain *domain,
|
|
|
|
struct msi_domain_info *info,
|
|
|
|
struct device *dev)
|
2017-12-29 09:42:10 +00:00
|
|
|
{
|
2017-12-29 09:47:22 +00:00
|
|
|
struct msi_desc *desc;
|
|
|
|
|
2020-08-26 11:16:51 +00:00
|
|
|
switch(domain->bus_token) {
|
|
|
|
case DOMAIN_BUS_PCI_MSI:
|
|
|
|
case DOMAIN_BUS_VMD_MSI:
|
|
|
|
break;
|
|
|
|
default:
|
2017-12-29 09:47:22 +00:00
|
|
|
return false;
|
2020-08-26 11:16:51 +00:00
|
|
|
}
|
2017-12-29 09:47:22 +00:00
|
|
|
|
2017-12-29 09:42:10 +00:00
|
|
|
if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
|
|
|
|
return false;
|
2017-12-29 09:47:22 +00:00
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Checking the first MSI descriptor is sufficient. MSIX supports
|
2021-11-03 23:27:29 +00:00
|
|
|
* masking and MSI does so when the can_mask attribute is set.
|
2017-12-29 09:47:22 +00:00
|
|
|
*/
|
|
|
|
desc = first_msi_entry(dev);
|
2021-12-06 22:27:39 +00:00
|
|
|
return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
|
2017-12-29 09:42:10 +00:00
|
|
|
}
|
|
|
|
|
2021-12-06 22:27:59 +00:00
|
|
|
static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
|
|
|
|
int allocated)
|
|
|
|
{
|
|
|
|
switch(domain->bus_token) {
|
|
|
|
case DOMAIN_BUS_PCI_MSI:
|
|
|
|
case DOMAIN_BUS_VMD_MSI:
|
|
|
|
if (IS_ENABLED(CONFIG_PCI_MSI))
|
|
|
|
break;
|
|
|
|
fallthrough;
|
|
|
|
default:
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Let a failed PCI multi MSI allocation retry */
|
|
|
|
if (desc->nvec_used > 1)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* If there was a successful allocation let the caller know */
|
|
|
|
return allocated ? allocated : -ENOSPC;
|
|
|
|
}
|
|
|
|
|
2021-12-06 22:51:44 +00:00
|
|
|
#define VIRQ_CAN_RESERVE 0x01
|
|
|
|
#define VIRQ_ACTIVATE 0x02
|
|
|
|
#define VIRQ_NOMASK_QUIRK 0x04
|
|
|
|
|
|
|
|
static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
|
|
|
|
{
|
|
|
|
struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!(vflags & VIRQ_CAN_RESERVE)) {
|
|
|
|
irqd_clr_can_reserve(irqd);
|
|
|
|
if (vflags & VIRQ_NOMASK_QUIRK)
|
|
|
|
irqd_set_msi_nomask_quirk(irqd);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(vflags & VIRQ_ACTIVATE))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
/*
|
|
|
|
* If the interrupt uses reservation mode, clear the activated bit
|
|
|
|
* so request_irq() will assign the final vector.
|
|
|
|
*/
|
|
|
|
if (vflags & VIRQ_CAN_RESERVE)
|
|
|
|
irqd_clr_activated(irqd);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-26 11:16:57 +00:00
|
|
|
int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
|
|
|
|
int nvec)
|
2014-11-15 14:24:04 +00:00
|
|
|
{
|
|
|
|
struct msi_domain_info *info = domain->host_data;
|
|
|
|
struct msi_domain_ops *ops = info->ops;
|
2020-12-18 06:00:39 +00:00
|
|
|
msi_alloc_info_t arg = { };
|
2021-12-06 22:51:44 +00:00
|
|
|
unsigned int vflags = 0;
|
|
|
|
struct msi_desc *desc;
|
2021-12-06 22:27:59 +00:00
|
|
|
int allocated = 0;
|
2016-07-04 08:39:22 +00:00
|
|
|
int i, ret, virq;
|
2014-11-15 14:24:04 +00:00
|
|
|
|
2015-11-23 08:26:05 +00:00
|
|
|
ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
|
2014-11-15 14:24:04 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-12-06 22:51:44 +00:00
|
|
|
/*
|
|
|
|
* This flag is set by the PCI layer as we need to activate
|
|
|
|
* the MSI entries before the PCI layer enables MSI in the
|
|
|
|
* card. Otherwise the card latches a random msi message.
|
|
|
|
*/
|
|
|
|
if (info->flags & MSI_FLAG_ACTIVATE_EARLY)
|
|
|
|
vflags |= VIRQ_ACTIVATE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interrupt can use a reserved vector and will not occupy
|
|
|
|
* a real device vector until the interrupt is requested.
|
|
|
|
*/
|
|
|
|
if (msi_check_reservation_mode(domain, info, dev)) {
|
|
|
|
vflags |= VIRQ_CAN_RESERVE;
|
|
|
|
/*
|
|
|
|
* MSI affinity setting requires a special quirk (X86) when
|
|
|
|
* reservation mode is active.
|
|
|
|
*/
|
|
|
|
if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
|
|
|
|
vflags |= VIRQ_NOMASK_QUIRK;
|
|
|
|
}
|
|
|
|
|
|
|
|
msi_for_each_desc(desc, dev, MSI_DESC_NOTASSOCIATED) {
|
2014-11-15 14:24:04 +00:00
|
|
|
ops->set_desc(&arg, desc);
|
|
|
|
|
2016-07-04 08:39:22 +00:00
|
|
|
virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
|
2016-07-04 08:39:24 +00:00
|
|
|
dev_to_node(dev), &arg, false,
|
2016-07-04 08:39:26 +00:00
|
|
|
desc->affinity);
|
2021-12-06 22:51:07 +00:00
|
|
|
if (virq < 0)
|
|
|
|
return msi_handle_pci_fail(domain, desc, allocated);
|
2014-11-15 14:24:04 +00:00
|
|
|
|
2017-09-13 21:29:05 +00:00
|
|
|
for (i = 0; i < desc->nvec_used; i++) {
|
2014-11-15 14:24:04 +00:00
|
|
|
irq_set_msi_desc_off(virq, i, desc);
|
2017-09-13 21:29:05 +00:00
|
|
|
irq_debugfs_copy_devname(virq + i, dev);
|
2021-12-06 22:51:44 +00:00
|
|
|
ret = msi_init_virq(domain, virq + i, vflags);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-09-13 21:29:05 +00:00
|
|
|
}
|
2021-12-06 22:27:59 +00:00
|
|
|
allocated++;
|
2014-11-15 14:24:04 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-12-06 22:51:12 +00:00
|
|
|
static int msi_domain_add_simple_msi_descs(struct msi_domain_info *info,
|
|
|
|
struct device *dev,
|
|
|
|
unsigned int num_descs)
|
|
|
|
{
|
|
|
|
if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return msi_add_simple_msi_descs(dev, 0, num_descs);
|
|
|
|
}
|
|
|
|
|
2014-11-15 14:24:04 +00:00
|
|
|
/**
|
2021-12-06 22:51:07 +00:00
|
|
|
* msi_domain_alloc_irqs_descs_locked - Allocate interrupts from a MSI interrupt domain
|
2020-08-26 11:16:57 +00:00
|
|
|
* @domain: The domain to allocate from
|
2014-11-15 14:24:04 +00:00
|
|
|
* @dev: Pointer to device struct of the device for which the interrupts
|
2020-08-26 11:16:57 +00:00
|
|
|
* are allocated
|
|
|
|
* @nvec: The number of interrupts to allocate
|
|
|
|
*
|
2021-12-06 22:51:07 +00:00
|
|
|
* Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
|
|
|
|
* pair. Use this for MSI irqdomains which implement their own vector
|
|
|
|
* allocation/free.
|
|
|
|
*
|
2021-08-10 23:48:35 +00:00
|
|
|
* Return: %0 on success or an error code.
|
2014-11-15 14:24:04 +00:00
|
|
|
*/
|
2021-12-06 22:51:07 +00:00
|
|
|
int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev,
|
|
|
|
int nvec)
|
2020-08-26 11:16:57 +00:00
|
|
|
{
|
|
|
|
struct msi_domain_info *info = domain->host_data;
|
|
|
|
struct msi_domain_ops *ops = info->ops;
|
2021-12-10 22:19:03 +00:00
|
|
|
int ret;
|
|
|
|
|
2021-12-06 22:51:07 +00:00
|
|
|
lockdep_assert_held(&dev->msi.data->mutex);
|
|
|
|
|
2021-12-06 22:51:12 +00:00
|
|
|
ret = msi_domain_add_simple_msi_descs(info, dev, nvec);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-12-10 22:19:03 +00:00
|
|
|
ret = ops->domain_alloc_irqs(domain, dev, nvec);
|
|
|
|
if (ret)
|
2021-12-06 22:51:07 +00:00
|
|
|
goto cleanup;
|
2021-12-10 22:19:03 +00:00
|
|
|
|
|
|
|
if (!(info->flags & MSI_FLAG_DEV_SYSFS))
|
|
|
|
return 0;
|
2020-08-26 11:16:57 +00:00
|
|
|
|
2021-12-10 22:19:03 +00:00
|
|
|
ret = msi_device_populate_sysfs(dev);
|
|
|
|
if (ret)
|
2021-12-06 22:51:07 +00:00
|
|
|
goto cleanup;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
msi_domain_free_irqs_descs_locked(domain, dev);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
|
|
|
|
* @domain: The domain to allocate from
|
|
|
|
* @dev: Pointer to device struct of the device for which the interrupts
|
|
|
|
* are allocated
|
|
|
|
* @nvec: The number of interrupts to allocate
|
|
|
|
*
|
|
|
|
* Return: %0 on success or an error code.
|
|
|
|
*/
|
|
|
|
int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
msi_lock_descs(dev);
|
|
|
|
ret = msi_domain_alloc_irqs_descs_locked(domain, dev, nvec);
|
|
|
|
msi_unlock_descs(dev);
|
2021-12-10 22:19:03 +00:00
|
|
|
return ret;
|
2020-08-26 11:16:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
|
2014-11-15 14:24:04 +00:00
|
|
|
{
|
2021-12-06 22:51:44 +00:00
|
|
|
struct irq_data *irqd;
|
2014-11-15 14:24:04 +00:00
|
|
|
struct msi_desc *desc;
|
2021-05-18 03:31:17 +00:00
|
|
|
int i;
|
|
|
|
|
2021-12-06 22:51:44 +00:00
|
|
|
/* Only handle MSI entries which have an interrupt associated */
|
|
|
|
msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
|
|
|
|
/* Make sure all interrupts are deactivated */
|
|
|
|
for (i = 0; i < desc->nvec_used; i++) {
|
|
|
|
irqd = irq_domain_get_irq_data(domain, desc->irq + i);
|
|
|
|
if (irqd && irqd_is_activated(irqd))
|
|
|
|
irq_domain_deactivate_irq(irqd);
|
2015-01-26 19:10:19 +00:00
|
|
|
}
|
2021-12-06 22:51:44 +00:00
|
|
|
|
|
|
|
irq_domain_free_irqs(desc->irq, desc->nvec_used);
|
|
|
|
desc->irq = 0;
|
2014-11-15 14:24:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-06 22:51:12 +00:00
|
|
|
static void msi_domain_free_msi_descs(struct msi_domain_info *info,
|
|
|
|
struct device *dev)
|
|
|
|
{
|
|
|
|
if (info->flags & MSI_FLAG_FREE_MSI_DESCS)
|
|
|
|
msi_free_msi_descs(dev);
|
|
|
|
}
|
|
|
|
|
2020-08-26 11:16:57 +00:00
|
|
|
/**
|
2021-12-06 22:51:07 +00:00
|
|
|
* msi_domain_free_irqs_descs_locked - Free interrupts from a MSI interrupt @domain associated to @dev
|
2020-08-26 11:16:57 +00:00
|
|
|
* @domain: The domain to managing the interrupts
|
|
|
|
* @dev: Pointer to device struct of the device for which the interrupts
|
|
|
|
* are free
|
2021-12-06 22:51:07 +00:00
|
|
|
*
|
|
|
|
* Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
|
|
|
|
* pair. Use this for MSI irqdomains which implement their own vector
|
|
|
|
* allocation.
|
2020-08-26 11:16:57 +00:00
|
|
|
*/
|
2021-12-06 22:51:07 +00:00
|
|
|
void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev)
|
2020-08-26 11:16:57 +00:00
|
|
|
{
|
|
|
|
struct msi_domain_info *info = domain->host_data;
|
|
|
|
struct msi_domain_ops *ops = info->ops;
|
|
|
|
|
2021-12-06 22:51:07 +00:00
|
|
|
lockdep_assert_held(&dev->msi.data->mutex);
|
|
|
|
|
2021-12-10 22:19:03 +00:00
|
|
|
if (info->flags & MSI_FLAG_DEV_SYSFS)
|
|
|
|
msi_device_destroy_sysfs(dev);
|
|
|
|
ops->domain_free_irqs(domain, dev);
|
2021-12-06 22:51:12 +00:00
|
|
|
msi_domain_free_msi_descs(info, dev);
|
2020-08-26 11:16:57 +00:00
|
|
|
}
|
|
|
|
|
2021-12-06 22:51:07 +00:00
|
|
|
/**
|
|
|
|
* msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated to @dev
|
|
|
|
* @domain: The domain to managing the interrupts
|
|
|
|
* @dev: Pointer to device struct of the device for which the interrupts
|
|
|
|
* are free
|
|
|
|
*/
|
|
|
|
void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
|
|
|
|
{
|
|
|
|
msi_lock_descs(dev);
|
|
|
|
msi_domain_free_irqs_descs_locked(domain, dev);
|
|
|
|
msi_unlock_descs(dev);
|
|
|
|
}
|
|
|
|
|
2014-11-12 10:39:03 +00:00
|
|
|
/**
|
|
|
|
* msi_get_domain_info - Get the MSI interrupt domain info for @domain
|
|
|
|
* @domain: The interrupt domain to retrieve data from
|
|
|
|
*
|
2021-08-10 23:48:35 +00:00
|
|
|
* Return: the pointer to the msi_domain_info stored in @domain->host_data.
|
2014-11-12 10:39:03 +00:00
|
|
|
*/
|
|
|
|
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
|
|
|
|
{
|
|
|
|
return (struct msi_domain_info *)domain->host_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
|