linux-stable/arch/s390/hypfs/hypfs_diag0c.c
Heiko Carstens c78d0c7484 s390: rename dma section to amode31
The dma section name is confusing, since the code which resides within
that section has nothing to do with direct memory access.  Instead the
limitation is that the code has to run in 31 bit addressing mode, and
therefore has to reside below 2GB.  So the name was chosen since
ZONE_DMA is the same region.

To reduce confusion rename the section to amode31, which hopefully
describes better what this is about.

Note: this will also change vmcoreinfo strings
- SDMA=... gets renamed to SAMODE31=...
- EDMA=... gets renamed to EAMODE31=...

Acked-by: Vasily Gorbik <gor@linux.ibm.com>
Reviewed-by: Alexander Egorenkov <egorenar@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2021-08-05 14:10:53 +02:00

125 lines
2.7 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Hypervisor filesystem for Linux on s390
*
* Diag 0C implementation
*
* Copyright IBM Corp. 2014
*/
#include <linux/slab.h>
#include <linux/cpu.h>
#include <asm/diag.h>
#include <asm/hypfs.h>
#include "hypfs.h"
#define DBFS_D0C_HDR_VERSION 0
/*
* Get hypfs_diag0c_entry from CPU vector and store diag0c data
*/
static void diag0c_fn(void *data)
{
diag_stat_inc(DIAG_STAT_X00C);
diag_amode31_ops.diag0c(((void **)data)[smp_processor_id()]);
}
/*
* Allocate buffer and store diag 0c data
*/
static void *diag0c_store(unsigned int *count)
{
struct hypfs_diag0c_data *diag0c_data;
unsigned int cpu_count, cpu, i;
void **cpu_vec;
cpus_read_lock();
cpu_count = num_online_cpus();
cpu_vec = kmalloc_array(num_possible_cpus(), sizeof(*cpu_vec),
GFP_KERNEL);
if (!cpu_vec)
goto fail_unlock_cpus;
/* Note: Diag 0c needs 8 byte alignment and real storage */
diag0c_data = kzalloc(struct_size(diag0c_data, entry, cpu_count),
GFP_KERNEL | GFP_DMA);
if (!diag0c_data)
goto fail_kfree_cpu_vec;
i = 0;
/* Fill CPU vector for each online CPU */
for_each_online_cpu(cpu) {
diag0c_data->entry[i].cpu = cpu;
cpu_vec[cpu] = &diag0c_data->entry[i++];
}
/* Collect data all CPUs */
on_each_cpu(diag0c_fn, cpu_vec, 1);
*count = cpu_count;
kfree(cpu_vec);
cpus_read_unlock();
return diag0c_data;
fail_kfree_cpu_vec:
kfree(cpu_vec);
fail_unlock_cpus:
cpus_read_unlock();
return ERR_PTR(-ENOMEM);
}
/*
* Hypfs DBFS callback: Free diag 0c data
*/
static void dbfs_diag0c_free(const void *data)
{
kfree(data);
}
/*
* Hypfs DBFS callback: Create diag 0c data
*/
static int dbfs_diag0c_create(void **data, void **data_free_ptr, size_t *size)
{
struct hypfs_diag0c_data *diag0c_data;
unsigned int count;
diag0c_data = diag0c_store(&count);
if (IS_ERR(diag0c_data))
return PTR_ERR(diag0c_data);
memset(&diag0c_data->hdr, 0, sizeof(diag0c_data->hdr));
store_tod_clock_ext((union tod_clock *)diag0c_data->hdr.tod_ext);
diag0c_data->hdr.len = count * sizeof(struct hypfs_diag0c_entry);
diag0c_data->hdr.version = DBFS_D0C_HDR_VERSION;
diag0c_data->hdr.count = count;
*data = diag0c_data;
*data_free_ptr = diag0c_data;
*size = diag0c_data->hdr.len + sizeof(struct hypfs_diag0c_hdr);
return 0;
}
/*
* Hypfs DBFS file structure
*/
static struct hypfs_dbfs_file dbfs_file_0c = {
.name = "diag_0c",
.data_create = dbfs_diag0c_create,
.data_free = dbfs_diag0c_free,
};
/*
* Initialize diag 0c interface for z/VM
*/
int __init hypfs_diag0c_init(void)
{
if (!MACHINE_IS_VM)
return 0;
hypfs_dbfs_create_file(&dbfs_file_0c);
return 0;
}
/*
* Shutdown diag 0c interface for z/VM
*/
void hypfs_diag0c_exit(void)
{
if (!MACHINE_IS_VM)
return;
hypfs_dbfs_remove_file(&dbfs_file_0c);
}