linux-stable/drivers/virtio/virtio_mmio.c

878 lines
22 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Virtio memory mapped device driver
*
* Copyright 2011-2014, ARM Ltd.
*
* This module allows virtio devices to be used over a virtual, memory mapped
* platform device.
*
* The guest device(s) may be instantiated in one of three equivalent ways:
*
* 1. Static platform device in board's code, eg.:
*
* static struct platform_device v2m_virtio_device = {
* .name = "virtio-mmio",
* .id = -1,
* .num_resources = 2,
* .resource = (struct resource []) {
* {
* .start = 0x1001e000,
* .end = 0x1001e0ff,
* .flags = IORESOURCE_MEM,
* }, {
* .start = 42 + 32,
* .end = 42 + 32,
* .flags = IORESOURCE_IRQ,
* },
* }
* };
*
* 2. Device Tree node, eg.:
*
* virtio_block@1e000 {
* compatible = "virtio,mmio";
* reg = <0x1e000 0x100>;
* interrupts = <42>;
* }
*
* 3. Kernel module (or command line) parameter. Can be used more than once -
* one device will be created for each one. Syntax:
*
* [virtio_mmio.]device=<size>@<baseaddr>:<irq>[:<id>]
* where:
* <size> := size (can use standard suffixes like K, M or G)
* <baseaddr> := physical base address
* <irq> := interrupt number (as passed to request_irq())
* <id> := (optional) platform device id
* eg.:
* virtio_mmio.device=0x100@0x100b0000:48 \
* virtio_mmio.device=1K@0x1001e000:74
*
* Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
*/
#define pr_fmt(fmt) "virtio-mmio: " fmt
#include <linux/acpi.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <uapi/linux/virtio_mmio.h>
#include <linux/virtio_ring.h>
/* The alignment to use between consumer and producer parts of vring.
* Currently hardcoded to the page size. */
#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
#define to_virtio_mmio_device(_plat_dev) \
container_of(_plat_dev, struct virtio_mmio_device, vdev)
struct virtio_mmio_device {
struct virtio_device vdev;
struct platform_device *pdev;
void __iomem *base;
unsigned long version;
/* a list of queues so we can dispatch IRQs */
spinlock_t lock;
struct list_head virtqueues;
};
struct virtio_mmio_vq_info {
/* the actual virtqueue */
struct virtqueue *vq;
/* the list node for the virtqueues list */
struct list_head node;
};
/* Configuration interface */
static u64 vm_get_features(struct virtio_device *vdev)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
u64 features;
writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
features <<= 32;
writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
return features;
}
static int vm_finalize_features(struct virtio_device *vdev)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
/* Make sure there are no mixed devices */
if (vm_dev->version == 2 &&
!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
return -EINVAL;
}
writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
writel((u32)(vdev->features >> 32),
vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
writel((u32)vdev->features,
vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
return 0;
}
static void vm_get(struct virtio_device *vdev, unsigned int offset,
void *buf, unsigned int len)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
u8 b;
__le16 w;
__le32 l;
if (vm_dev->version == 1) {
u8 *ptr = buf;
int i;
for (i = 0; i < len; i++)
ptr[i] = readb(base + offset + i);
return;
}
switch (len) {
case 1:
b = readb(base + offset);
memcpy(buf, &b, sizeof b);
break;
case 2:
w = cpu_to_le16(readw(base + offset));
memcpy(buf, &w, sizeof w);
break;
case 4:
l = cpu_to_le32(readl(base + offset));
memcpy(buf, &l, sizeof l);
break;
case 8:
l = cpu_to_le32(readl(base + offset));
memcpy(buf, &l, sizeof l);
l = cpu_to_le32(ioread32(base + offset + sizeof l));
memcpy(buf + sizeof l, &l, sizeof l);
break;
default:
BUG();
}
}
static void vm_set(struct virtio_device *vdev, unsigned int offset,
const void *buf, unsigned int len)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
u8 b;
__le16 w;
__le32 l;
if (vm_dev->version == 1) {
const u8 *ptr = buf;
int i;
for (i = 0; i < len; i++)
writeb(ptr[i], base + offset + i);
return;
}
switch (len) {
case 1:
memcpy(&b, buf, sizeof b);
writeb(b, base + offset);
break;
case 2:
memcpy(&w, buf, sizeof w);
writew(le16_to_cpu(w), base + offset);
break;
case 4:
memcpy(&l, buf, sizeof l);
writel(le32_to_cpu(l), base + offset);
break;
case 8:
memcpy(&l, buf, sizeof l);
writel(le32_to_cpu(l), base + offset);
memcpy(&l, buf + sizeof l, sizeof l);
writel(le32_to_cpu(l), base + offset + sizeof l);
break;
default:
BUG();
}
}
static u32 vm_generation(struct virtio_device *vdev)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
if (vm_dev->version == 1)
return 0;
else
return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION);
}
static u8 vm_get_status(struct virtio_device *vdev)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff;
}
static void vm_set_status(struct virtio_device *vdev, u8 status)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
/* We should never be setting status to 0. */
BUG_ON(status == 0);
virtio: harden vring IRQ This is a rework on the previous IRQ hardening that is done for virtio-pci where several drawbacks were found and were reverted: 1) try to use IRQF_NO_AUTOEN which is not friendly to affinity managed IRQ that is used by some device such as virtio-blk 2) done only for PCI transport The vq->broken is re-used in this patch for implementing the IRQ hardening. The vq->broken is set to true during both initialization and reset. And the vq->broken is set to false in virtio_device_ready(). Then vring_interrupt() can check and return when vq->broken is true. And in this case, switch to return IRQ_NONE to let the interrupt core aware of such invalid interrupt to prevent IRQ storm. The reason of using a per queue variable instead of a per device one is that we may need it for per queue reset hardening in the future. Note that the hardening is only done for vring interrupt since the config interrupt hardening is already done in commit 22b7050a024d7 ("virtio: defer config changed notifications"). But the method that is used by config interrupt can't be reused by the vring interrupt handler because it uses spinlock to do the synchronization which is expensive. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: "Paul E. McKenney" <paulmck@kernel.org> Cc: Marc Zyngier <maz@kernel.org> Cc: Halil Pasic <pasic@linux.ibm.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Vineeth Vijayan <vneethv@linux.ibm.com> Cc: Peter Oberparleiter <oberpar@linux.ibm.com> Cc: linux-s390@vger.kernel.org Signed-off-by: Jason Wang <jasowang@redhat.com> Message-Id: <20220527060120.20964-9-jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
2022-05-27 06:01:19 +00:00
/*
* Per memory-barriers.txt, wmb() is not needed to guarantee
* that the cache coherent memory writes have completed
virtio: harden vring IRQ This is a rework on the previous IRQ hardening that is done for virtio-pci where several drawbacks were found and were reverted: 1) try to use IRQF_NO_AUTOEN which is not friendly to affinity managed IRQ that is used by some device such as virtio-blk 2) done only for PCI transport The vq->broken is re-used in this patch for implementing the IRQ hardening. The vq->broken is set to true during both initialization and reset. And the vq->broken is set to false in virtio_device_ready(). Then vring_interrupt() can check and return when vq->broken is true. And in this case, switch to return IRQ_NONE to let the interrupt core aware of such invalid interrupt to prevent IRQ storm. The reason of using a per queue variable instead of a per device one is that we may need it for per queue reset hardening in the future. Note that the hardening is only done for vring interrupt since the config interrupt hardening is already done in commit 22b7050a024d7 ("virtio: defer config changed notifications"). But the method that is used by config interrupt can't be reused by the vring interrupt handler because it uses spinlock to do the synchronization which is expensive. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: "Paul E. McKenney" <paulmck@kernel.org> Cc: Marc Zyngier <maz@kernel.org> Cc: Halil Pasic <pasic@linux.ibm.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Vineeth Vijayan <vneethv@linux.ibm.com> Cc: Peter Oberparleiter <oberpar@linux.ibm.com> Cc: linux-s390@vger.kernel.org Signed-off-by: Jason Wang <jasowang@redhat.com> Message-Id: <20220527060120.20964-9-jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
2022-05-27 06:01:19 +00:00
* before writing to the MMIO region.
*/
writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
}
static void vm_reset(struct virtio_device *vdev)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
/* 0 status means a reset. */
writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
}
/* Transport interface */
/* the notify function used when creating a virt queue */
static bool vm_notify(struct virtqueue *vq)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
/* We write the queue's selector into the notification register to
* signal the other end */
writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
return true;
}
static bool vm_notify_with_data(struct virtqueue *vq)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
u32 data = vring_notification_data(vq);
writel(data, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
return true;
}
/* Notify all virtqueues on an interrupt. */
static irqreturn_t vm_interrupt(int irq, void *opaque)
{
struct virtio_mmio_device *vm_dev = opaque;
struct virtio_mmio_vq_info *info;
unsigned long status;
unsigned long flags;
irqreturn_t ret = IRQ_NONE;
/* Read and acknowledge interrupts */
status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) {
virtio_config_changed(&vm_dev->vdev);
ret = IRQ_HANDLED;
}
if (likely(status & VIRTIO_MMIO_INT_VRING)) {
spin_lock_irqsave(&vm_dev->lock, flags);
list_for_each_entry(info, &vm_dev->virtqueues, node)
ret |= vring_interrupt(irq, info->vq);
spin_unlock_irqrestore(&vm_dev->lock, flags);
}
return ret;
}
static void vm_del_vq(struct virtqueue *vq)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
struct virtio_mmio_vq_info *info = vq->priv;
unsigned long flags;
unsigned int index = vq->index;
spin_lock_irqsave(&vm_dev->lock, flags);
list_del(&info->node);
spin_unlock_irqrestore(&vm_dev->lock, flags);
/* Select and deactivate the queue */
writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
if (vm_dev->version == 1) {
writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
} else {
writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
}
vring_del_virtqueue(vq);
kfree(info);
}
static void vm_del_vqs(struct virtio_device *vdev)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
struct virtqueue *vq, *n;
list_for_each_entry_safe(vq, n, &vdev->vqs, list)
vm_del_vq(vq);
free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
}
static void vm_synchronize_cbs(struct virtio_device *vdev)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
synchronize_irq(platform_get_irq(vm_dev->pdev, 0));
}
static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name, bool ctx)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
bool (*notify)(struct virtqueue *vq);
struct virtio_mmio_vq_info *info;
struct virtqueue *vq;
unsigned long flags;
unsigned int num;
int err;
if (__virtio_test_bit(vdev, VIRTIO_F_NOTIFICATION_DATA))
notify = vm_notify_with_data;
else
notify = vm_notify;
if (!name)
return NULL;
/* Select the queue we're interested in */
writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
/* Queue shouldn't already be set up. */
if (readl(vm_dev->base + (vm_dev->version == 1 ?
VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
err = -ENOENT;
goto error_available;
}
/* Allocate and fill out our active queue description */
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
err = -ENOMEM;
goto error_kmalloc;
}
num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
if (num == 0) {
err = -ENOENT;
goto error_new_virtqueue;
}
/* Create the vring */
vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
true, true, ctx, notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto error_new_virtqueue;
}
vq->num_max = num;
/* Activate the queue */
writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
if (vm_dev->version == 1) {
u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
/*
* virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something
* that doesn't fit in 32bit, fail the setup rather than
* pretending to be successful.
*/
if (q_pfn >> 32) {
dev_err(&vdev->dev,
"platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
0x1ULL << (32 + PAGE_SHIFT - 30));
err = -E2BIG;
goto error_bad_pfn;
}
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
writel(q_pfn, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
} else {
u64 addr;
addr = virtqueue_get_desc_addr(vq);
writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
writel((u32)(addr >> 32),
vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
addr = virtqueue_get_avail_addr(vq);
writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
writel((u32)(addr >> 32),
vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
addr = virtqueue_get_used_addr(vq);
writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
writel((u32)(addr >> 32),
vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
}
vq->priv = info;
info->vq = vq;
spin_lock_irqsave(&vm_dev->lock, flags);
list_add(&info->node, &vm_dev->virtqueues);
spin_unlock_irqrestore(&vm_dev->lock, flags);
return vq;
error_bad_pfn:
vring_del_virtqueue(vq);
error_new_virtqueue:
if (vm_dev->version == 1) {
writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
} else {
writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
}
kfree(info);
error_kmalloc:
error_available:
return ERR_PTR(err);
}
static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
const bool *ctx,
struct irq_affinity *desc)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
int irq = platform_get_irq(vm_dev->pdev, 0);
int i, err, queue_idx = 0;
if (irq < 0)
return irq;
err = request_irq(irq, vm_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vm_dev);
if (err)
return err;
if (of_property_read_bool(vm_dev->pdev->dev.of_node, "wakeup-source"))
enable_irq_wake(irq);
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
continue;
}
vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
vm_del_vqs(vdev);
return PTR_ERR(vqs[i]);
}
}
return 0;
}
static const char *vm_bus_name(struct virtio_device *vdev)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
return vm_dev->pdev->name;
}
static bool vm_get_shm_region(struct virtio_device *vdev,
struct virtio_shm_region *region, u8 id)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
u64 len, addr;
/* Select the region we're interested in */
writel(id, vm_dev->base + VIRTIO_MMIO_SHM_SEL);
/* Read the region size */
len = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_LOW);
len |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_HIGH) << 32;
region->len = len;
/* Check if region length is -1. If that's the case, the shared memory
* region does not exist and there is no need to proceed further.
*/
if (len == ~(u64)0)
return false;
/* Read the region base address */
addr = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_LOW);
addr |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_HIGH) << 32;
region->addr = addr;
return true;
}
static const struct virtio_config_ops virtio_mmio_config_ops = {
.get = vm_get,
.set = vm_set,
.generation = vm_generation,
.get_status = vm_get_status,
.set_status = vm_set_status,
.reset = vm_reset,
.find_vqs = vm_find_vqs,
.del_vqs = vm_del_vqs,
.get_features = vm_get_features,
.finalize_features = vm_finalize_features,
.bus_name = vm_bus_name,
.get_shm_region = vm_get_shm_region,
.synchronize_cbs = vm_synchronize_cbs,
};
#ifdef CONFIG_PM_SLEEP
static int virtio_mmio_freeze(struct device *dev)
{
struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
return virtio_device_freeze(&vm_dev->vdev);
}
static int virtio_mmio_restore(struct device *dev)
{
struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
if (vm_dev->version == 1)
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
return virtio_device_restore(&vm_dev->vdev);
}
static const struct dev_pm_ops virtio_mmio_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(virtio_mmio_freeze, virtio_mmio_restore)
};
#endif
static void virtio_mmio_release_dev(struct device *_d)
{
struct virtio_device *vdev =
container_of(_d, struct virtio_device, dev);
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
kfree(vm_dev);
}
/* Platform device */
static int virtio_mmio_probe(struct platform_device *pdev)
{
struct virtio_mmio_device *vm_dev;
unsigned long magic;
int rc;
vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL);
virtio_mmio: fix devm cleanup Recent rework of the virtio_mmio probe/remove paths balanced a devm_ioremap() with an iounmap() rather than its devm variant. This ends up corrupting the devm datastructures, and results in the following boot-time splat on arm64 under QEMU 2.9.0: [ 3.450397] ------------[ cut here ]------------ [ 3.453822] Trying to vfree() nonexistent vm area (00000000c05b4844) [ 3.460534] WARNING: CPU: 1 PID: 1 at mm/vmalloc.c:1525 __vunmap+0x1b8/0x220 [ 3.475898] Kernel panic - not syncing: panic_on_warn set ... [ 3.475898] [ 3.493933] CPU: 1 PID: 1 Comm: swapper/0 Not tainted 4.15.0-rc3 #1 [ 3.513109] Hardware name: linux,dummy-virt (DT) [ 3.525382] Call trace: [ 3.531683] dump_backtrace+0x0/0x368 [ 3.543921] show_stack+0x20/0x30 [ 3.547767] dump_stack+0x108/0x164 [ 3.559584] panic+0x25c/0x51c [ 3.569184] __warn+0x29c/0x31c [ 3.576023] report_bug+0x1d4/0x290 [ 3.586069] bug_handler.part.2+0x40/0x100 [ 3.597820] bug_handler+0x4c/0x88 [ 3.608400] brk_handler+0x11c/0x218 [ 3.613430] do_debug_exception+0xe8/0x318 [ 3.627370] el1_dbg+0x18/0x78 [ 3.634037] __vunmap+0x1b8/0x220 [ 3.648747] vunmap+0x6c/0xc0 [ 3.653864] __iounmap+0x44/0x58 [ 3.659771] devm_ioremap_release+0x34/0x68 [ 3.672983] release_nodes+0x404/0x880 [ 3.683543] devres_release_all+0x6c/0xe8 [ 3.695692] driver_probe_device+0x250/0x828 [ 3.706187] __driver_attach+0x190/0x210 [ 3.717645] bus_for_each_dev+0x14c/0x1f0 [ 3.728633] driver_attach+0x48/0x78 [ 3.740249] bus_add_driver+0x26c/0x5b8 [ 3.752248] driver_register+0x16c/0x398 [ 3.757211] __platform_driver_register+0xd8/0x128 [ 3.770860] virtio_mmio_init+0x1c/0x24 [ 3.782671] do_one_initcall+0xe0/0x398 [ 3.791890] kernel_init_freeable+0x594/0x660 [ 3.798514] kernel_init+0x18/0x190 [ 3.810220] ret_from_fork+0x10/0x18 To fix this, we can simply rip out the explicit cleanup that the devm infrastructure will do for us when our probe function returns an error code, or when our remove function returns. We only need to ensure that we call put_device() if a call to register_virtio_device() fails in the probe path. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Fixes: 7eb781b1bbb7136f ("virtio_mmio: add cleanup for virtio_mmio_probe") Fixes: 25f32223bce5c580 ("virtio_mmio: add cleanup for virtio_mmio_remove") Cc: Cornelia Huck <cohuck@redhat.com> Cc: Michael S. Tsirkin <mst@redhat.com> Cc: weiping zhang <zhangweiping@didichuxing.com> Cc: virtualization@lists.linux-foundation.org Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Cornelia Huck <cohuck@redhat.com>
2017-12-12 13:45:50 +00:00
if (!vm_dev)
return -ENOMEM;
vm_dev->vdev.dev.parent = &pdev->dev;
vm_dev->vdev.dev.release = virtio_mmio_release_dev;
vm_dev->vdev.config = &virtio_mmio_config_ops;
vm_dev->pdev = pdev;
INIT_LIST_HEAD(&vm_dev->virtqueues);
spin_lock_init(&vm_dev->lock);
vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vm_dev->base)) {
rc = PTR_ERR(vm_dev->base);
goto free_vm_dev;
}
/* Check magic value */
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
rc = -ENODEV;
goto free_vm_dev;
}
/* Check device version */
vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
if (vm_dev->version < 1 || vm_dev->version > 2) {
dev_err(&pdev->dev, "Version %ld not supported!\n",
vm_dev->version);
rc = -ENXIO;
goto free_vm_dev;
}
vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
if (vm_dev->vdev.id.device == 0) {
/*
* virtio-mmio device with an ID 0 is a (dummy) placeholder
* with no function. End probing now with no error reported.
*/
rc = -ENODEV;
goto free_vm_dev;
}
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
if (vm_dev->version == 1) {
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
/*
* In the legacy case, ensure our coherently-allocated virtio
* ring will be at an address expressable as a 32-bit PFN.
*/
if (!rc)
dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32 + PAGE_SHIFT));
} else {
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
}
if (rc)
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
platform_set_drvdata(pdev, vm_dev);
rc = register_virtio_device(&vm_dev->vdev);
virtio_mmio: fix devm cleanup Recent rework of the virtio_mmio probe/remove paths balanced a devm_ioremap() with an iounmap() rather than its devm variant. This ends up corrupting the devm datastructures, and results in the following boot-time splat on arm64 under QEMU 2.9.0: [ 3.450397] ------------[ cut here ]------------ [ 3.453822] Trying to vfree() nonexistent vm area (00000000c05b4844) [ 3.460534] WARNING: CPU: 1 PID: 1 at mm/vmalloc.c:1525 __vunmap+0x1b8/0x220 [ 3.475898] Kernel panic - not syncing: panic_on_warn set ... [ 3.475898] [ 3.493933] CPU: 1 PID: 1 Comm: swapper/0 Not tainted 4.15.0-rc3 #1 [ 3.513109] Hardware name: linux,dummy-virt (DT) [ 3.525382] Call trace: [ 3.531683] dump_backtrace+0x0/0x368 [ 3.543921] show_stack+0x20/0x30 [ 3.547767] dump_stack+0x108/0x164 [ 3.559584] panic+0x25c/0x51c [ 3.569184] __warn+0x29c/0x31c [ 3.576023] report_bug+0x1d4/0x290 [ 3.586069] bug_handler.part.2+0x40/0x100 [ 3.597820] bug_handler+0x4c/0x88 [ 3.608400] brk_handler+0x11c/0x218 [ 3.613430] do_debug_exception+0xe8/0x318 [ 3.627370] el1_dbg+0x18/0x78 [ 3.634037] __vunmap+0x1b8/0x220 [ 3.648747] vunmap+0x6c/0xc0 [ 3.653864] __iounmap+0x44/0x58 [ 3.659771] devm_ioremap_release+0x34/0x68 [ 3.672983] release_nodes+0x404/0x880 [ 3.683543] devres_release_all+0x6c/0xe8 [ 3.695692] driver_probe_device+0x250/0x828 [ 3.706187] __driver_attach+0x190/0x210 [ 3.717645] bus_for_each_dev+0x14c/0x1f0 [ 3.728633] driver_attach+0x48/0x78 [ 3.740249] bus_add_driver+0x26c/0x5b8 [ 3.752248] driver_register+0x16c/0x398 [ 3.757211] __platform_driver_register+0xd8/0x128 [ 3.770860] virtio_mmio_init+0x1c/0x24 [ 3.782671] do_one_initcall+0xe0/0x398 [ 3.791890] kernel_init_freeable+0x594/0x660 [ 3.798514] kernel_init+0x18/0x190 [ 3.810220] ret_from_fork+0x10/0x18 To fix this, we can simply rip out the explicit cleanup that the devm infrastructure will do for us when our probe function returns an error code, or when our remove function returns. We only need to ensure that we call put_device() if a call to register_virtio_device() fails in the probe path. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Fixes: 7eb781b1bbb7136f ("virtio_mmio: add cleanup for virtio_mmio_probe") Fixes: 25f32223bce5c580 ("virtio_mmio: add cleanup for virtio_mmio_remove") Cc: Cornelia Huck <cohuck@redhat.com> Cc: Michael S. Tsirkin <mst@redhat.com> Cc: weiping zhang <zhangweiping@didichuxing.com> Cc: virtualization@lists.linux-foundation.org Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Cornelia Huck <cohuck@redhat.com>
2017-12-12 13:45:50 +00:00
if (rc)
put_device(&vm_dev->vdev.dev);
virtio_mmio: fix devm cleanup Recent rework of the virtio_mmio probe/remove paths balanced a devm_ioremap() with an iounmap() rather than its devm variant. This ends up corrupting the devm datastructures, and results in the following boot-time splat on arm64 under QEMU 2.9.0: [ 3.450397] ------------[ cut here ]------------ [ 3.453822] Trying to vfree() nonexistent vm area (00000000c05b4844) [ 3.460534] WARNING: CPU: 1 PID: 1 at mm/vmalloc.c:1525 __vunmap+0x1b8/0x220 [ 3.475898] Kernel panic - not syncing: panic_on_warn set ... [ 3.475898] [ 3.493933] CPU: 1 PID: 1 Comm: swapper/0 Not tainted 4.15.0-rc3 #1 [ 3.513109] Hardware name: linux,dummy-virt (DT) [ 3.525382] Call trace: [ 3.531683] dump_backtrace+0x0/0x368 [ 3.543921] show_stack+0x20/0x30 [ 3.547767] dump_stack+0x108/0x164 [ 3.559584] panic+0x25c/0x51c [ 3.569184] __warn+0x29c/0x31c [ 3.576023] report_bug+0x1d4/0x290 [ 3.586069] bug_handler.part.2+0x40/0x100 [ 3.597820] bug_handler+0x4c/0x88 [ 3.608400] brk_handler+0x11c/0x218 [ 3.613430] do_debug_exception+0xe8/0x318 [ 3.627370] el1_dbg+0x18/0x78 [ 3.634037] __vunmap+0x1b8/0x220 [ 3.648747] vunmap+0x6c/0xc0 [ 3.653864] __iounmap+0x44/0x58 [ 3.659771] devm_ioremap_release+0x34/0x68 [ 3.672983] release_nodes+0x404/0x880 [ 3.683543] devres_release_all+0x6c/0xe8 [ 3.695692] driver_probe_device+0x250/0x828 [ 3.706187] __driver_attach+0x190/0x210 [ 3.717645] bus_for_each_dev+0x14c/0x1f0 [ 3.728633] driver_attach+0x48/0x78 [ 3.740249] bus_add_driver+0x26c/0x5b8 [ 3.752248] driver_register+0x16c/0x398 [ 3.757211] __platform_driver_register+0xd8/0x128 [ 3.770860] virtio_mmio_init+0x1c/0x24 [ 3.782671] do_one_initcall+0xe0/0x398 [ 3.791890] kernel_init_freeable+0x594/0x660 [ 3.798514] kernel_init+0x18/0x190 [ 3.810220] ret_from_fork+0x10/0x18 To fix this, we can simply rip out the explicit cleanup that the devm infrastructure will do for us when our probe function returns an error code, or when our remove function returns. We only need to ensure that we call put_device() if a call to register_virtio_device() fails in the probe path. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Fixes: 7eb781b1bbb7136f ("virtio_mmio: add cleanup for virtio_mmio_probe") Fixes: 25f32223bce5c580 ("virtio_mmio: add cleanup for virtio_mmio_remove") Cc: Cornelia Huck <cohuck@redhat.com> Cc: Michael S. Tsirkin <mst@redhat.com> Cc: weiping zhang <zhangweiping@didichuxing.com> Cc: virtualization@lists.linux-foundation.org Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Cornelia Huck <cohuck@redhat.com>
2017-12-12 13:45:50 +00:00
return rc;
free_vm_dev:
kfree(vm_dev);
return rc;
}
static int virtio_mmio_remove(struct platform_device *pdev)
{
struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
unregister_virtio_device(&vm_dev->vdev);
return 0;
}
/* Devices list parameter */
#if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES)
static struct device vm_cmdline_parent = {
.init_name = "virtio-mmio-cmdline",
};
static int vm_cmdline_parent_registered;
static int vm_cmdline_id;
static int vm_cmdline_set(const char *device,
const struct kernel_param *kp)
{
int err;
struct resource resources[2] = {};
char *str;
long long base, size;
unsigned int irq;
int processed, consumed = 0;
struct platform_device *pdev;
/* Consume "size" part of the command line parameter */
size = memparse(device, &str);
/* Get "@<base>:<irq>[:<id>]" chunks */
processed = sscanf(str, "@%lli:%u%n:%d%n",
&base, &irq, &consumed,
&vm_cmdline_id, &consumed);
/*
* sscanf() must process at least 2 chunks; also there
* must be no extra characters after the last chunk, so
* str[consumed] must be '\0'
*/
if (processed < 2 || str[consumed] || irq == 0)
return -EINVAL;
resources[0].flags = IORESOURCE_MEM;
resources[0].start = base;
resources[0].end = base + size - 1;
resources[1].flags = IORESOURCE_IRQ;
resources[1].start = resources[1].end = irq;
if (!vm_cmdline_parent_registered) {
err = device_register(&vm_cmdline_parent);
if (err) {
put_device(&vm_cmdline_parent);
pr_err("Failed to register parent device!\n");
return err;
}
vm_cmdline_parent_registered = 1;
}
pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n",
vm_cmdline_id,
(unsigned long long)resources[0].start,
(unsigned long long)resources[0].end,
(int)resources[1].start);
pdev = platform_device_register_resndata(&vm_cmdline_parent,
"virtio-mmio", vm_cmdline_id++,
resources, ARRAY_SIZE(resources), NULL, 0);
return PTR_ERR_OR_ZERO(pdev);
}
static int vm_cmdline_get_device(struct device *dev, void *data)
{
char *buffer = data;
unsigned int len = strlen(buffer);
struct platform_device *pdev = to_platform_device(dev);
snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n",
pdev->resource[0].end - pdev->resource[0].start + 1ULL,
(unsigned long long)pdev->resource[0].start,
(unsigned long long)pdev->resource[1].start,
pdev->id);
return 0;
}
static int vm_cmdline_get(char *buffer, const struct kernel_param *kp)
{
buffer[0] = '\0';
device_for_each_child(&vm_cmdline_parent, buffer,
vm_cmdline_get_device);
return strlen(buffer) + 1;
}
static const struct kernel_param_ops vm_cmdline_param_ops = {
.set = vm_cmdline_set,
.get = vm_cmdline_get,
};
device_param_cb(device, &vm_cmdline_param_ops, NULL, S_IRUSR);
static int vm_unregister_cmdline_device(struct device *dev,
void *data)
{
platform_device_unregister(to_platform_device(dev));
return 0;
}
static void vm_unregister_cmdline_devices(void)
{
if (vm_cmdline_parent_registered) {
device_for_each_child(&vm_cmdline_parent, NULL,
vm_unregister_cmdline_device);
device_unregister(&vm_cmdline_parent);
vm_cmdline_parent_registered = 0;
}
}
#else
static void vm_unregister_cmdline_devices(void)
{
}
#endif
/* Platform driver */
static const struct of_device_id virtio_mmio_match[] = {
{ .compatible = "virtio,mmio", },
{},
};
MODULE_DEVICE_TABLE(of, virtio_mmio_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id virtio_mmio_acpi_match[] = {
{ "LNRO0005", },
{ }
};
MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
#endif
static struct platform_driver virtio_mmio_driver = {
.probe = virtio_mmio_probe,
.remove = virtio_mmio_remove,
.driver = {
.name = "virtio-mmio",
.of_match_table = virtio_mmio_match,
.acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
#ifdef CONFIG_PM_SLEEP
.pm = &virtio_mmio_pm_ops,
#endif
},
};
static int __init virtio_mmio_init(void)
{
return platform_driver_register(&virtio_mmio_driver);
}
static void __exit virtio_mmio_exit(void)
{
platform_driver_unregister(&virtio_mmio_driver);
vm_unregister_cmdline_devices();
}
module_init(virtio_mmio_init);
module_exit(virtio_mmio_exit);
MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices");
MODULE_LICENSE("GPL");