2nd batch of s390 updates for 5.15 merge window

- Fix topology update on cpu hotplug, so notifiers see expected masks. This bug
   was uncovered with SCHED_CORE support.
 
 - Fix stack unwinding so that the correct number of entries are omitted like
   expected by common code. This fixes KCSAN selftests.
 
 - Add kmemleak annotation to stack_alloc to avoid false positive kmemleak
   warnings.
 
 - Avoid layering violation in common I/O code and don't unregister subchannel
   from child-drivers.
 
 - Remove xpram device driver for which no real use case exists since the kernel
   is 64 bit only. Also all hypervisors got required support removed in the
   meantime, which means the xpram device driver is dead code.
 
 - Fix -ENODEV handling of clp_get_state in our PCI code.
 
 - Enable KFENCE in debug defconfig.
 
 - Cleanup hugetlbfs s390 specific Kconfig dependency.
 
 - Quite a lot of trivial fixes to get rid of "W=1" warnings, and and other
   simple cleanups.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEECMNfWEw3SLnmiLkZIg7DeRspbsIFAmE56jEACgkQIg7DeRsp
 bsI1sQ/+L91zvpjlWGEPjZhQmFJgDufuObLWJlhwOSPsOlezzJTujNscoisTe6Wm
 hfS1I/GzGsgcY3695xgBLgkPS37nrDdDLAgM4CnajOOalEZjbHgH5gcPiCPHfPAD
 QkvVFv2PjCQnaPx81kEIeK6tMFkvi6IRhfwhtGTf1fwoKDyw4IQT1couBsiuAy3n
 28/7NqMidS4gbv5X/BLK1Ez4as9d3PoecNre1debRPOZcdxIjCVDy7OW5MotI3ol
 ENsOHtNJe/orIDCc+QbsEP2xZJZdbZ0D0Zr/RQ4KEue42wKtGLzp/ZuG+UfTPyyx
 vlEDgMRgPHAGnceEImcMwK0XQwOn05sm13jOkbmpIwhmiE46rksAPf3cGL4DjlBP
 3rznDXoLYELX2OAHz2G4jfbrqFWDxbh5rp1NMr8tELvJV5xbdsMC11QFQY28swod
 /sUE39fX+zynwHSSttq0PXtKX4gr/d5ZMDdlhjl7lxlOgwEwDodBL3/xL81+C0qx
 jkQWDsJ6OpZ7iJpGvxaCUhFjlgihdi2InZ942inRGo/A/EaM6/7diExLiyqfaab5
 WEQ2BOlITUey85Fiu2WxeeweRChUwu+XNQt+Nx4hDF454K51htU/GJCUBW5Z5qtN
 Dm+/DolXkPY+joR7xBLHNzivob3ShcsoFiZjoBpTc/Hd18dhSQg=
 =fpJz
 -----END PGP SIGNATURE-----

Merge tag 's390-5.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Heiko Carstens:
 "Except for the xpram device driver removal it is all about fixes and
  cleanups.

   - Fix topology update on cpu hotplug, so notifiers see expected
     masks. This bug was uncovered with SCHED_CORE support.

   - Fix stack unwinding so that the correct number of entries are
     omitted like expected by common code. This fixes KCSAN selftests.

   - Add kmemleak annotation to stack_alloc to avoid false positive
     kmemleak warnings.

   - Avoid layering violation in common I/O code and don't unregister
     subchannel from child-drivers.

   - Remove xpram device driver for which no real use case exists since
     the kernel is 64 bit only. Also all hypervisors got required
     support removed in the meantime, which means the xpram device
     driver is dead code.

   - Fix -ENODEV handling of clp_get_state in our PCI code.

   - Enable KFENCE in debug defconfig.

   - Cleanup hugetlbfs s390 specific Kconfig dependency.

   - Quite a lot of trivial fixes to get rid of "W=1" warnings, and and
     other simple cleanups"

* tag 's390-5.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  hugetlbfs: s390 is always 64bit
  s390/ftrace: remove incorrect __va usage
  s390/zcrypt: remove incorrect kernel doc indicators
  scsi: zfcp: fix kernel doc comments
  s390/sclp: add __nonstring annotation
  s390/hmcdrv_ftp: fix kernel doc comment
  s390: remove xpram device driver
  s390/pci: read clp_list_pci_req only once
  s390/pci: fix clp_get_state() handling of -ENODEV
  s390/cio: fix kernel doc comment
  s390/ctrlchar: fix kernel doc comment
  s390/con3270: use proper type for tasklet function
  s390/cpum_cf: move array from header to C file
  s390/mm: fix kernel doc comments
  s390/topology: fix topology information when calling cpu hotplug notifiers
  s390/unwind: use current_frame_address() to unwind current task
  s390/configs: enable CONFIG_KFENCE in debug_defconfig
  s390/entry: make oklabel within CHKSTG macro local
  s390: add kmemleak annotation in stack_alloc()
  s390/cio: dont unregister subchannel from child-drivers
This commit is contained in:
Linus Torvalds 2021-09-09 12:55:12 -07:00
commit f154c80667
38 changed files with 139 additions and 562 deletions

View File

@ -110,6 +110,7 @@ config S390
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF

View File

@ -804,6 +804,7 @@ CONFIG_DEBUG_VM_PGFLAGS=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_DEBUG_PER_CPU_MAPS=y
CONFIG_KFENCE=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_DETECT_HUNG_TASK=y

View File

@ -397,7 +397,6 @@ CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
# CONFIG_BLK_DEV_XPRAM is not set
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_RBD=m
CONFIG_BLK_DEV_NVME=m

View File

@ -35,7 +35,6 @@ CONFIG_NET=y
# CONFIG_ETHTOOL_NETLINK is not set
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_RAM=y
# CONFIG_BLK_DEV_XPRAM is not set
# CONFIG_DCSSBLK is not set
# CONFIG_DASD is not set
CONFIG_ENCLOSURE_SERVICES=y

View File

@ -24,13 +24,6 @@ enum cpumf_ctr_set {
#define CPUMF_LCCTL_ENABLE_SHIFT 16
#define CPUMF_LCCTL_ACTCTL_SHIFT 0
static const u64 cpumf_ctr_ctl[CPUMF_CTR_SET_MAX] = {
[CPUMF_CTR_SET_BASIC] = 0x02,
[CPUMF_CTR_SET_USER] = 0x04,
[CPUMF_CTR_SET_CRYPTO] = 0x08,
[CPUMF_CTR_SET_EXT] = 0x01,
[CPUMF_CTR_SET_MT_DIAG] = 0x20,
};
static inline void ctr_set_enable(u64 *state, u64 ctrsets)
{

View File

@ -18,6 +18,7 @@ extern struct mutex smp_cpu_state_mutex;
extern unsigned int smp_cpu_mt_shift;
extern unsigned int smp_cpu_mtid;
extern __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
extern cpumask_t cpu_setup_mask;
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);

View File

@ -34,16 +34,6 @@ static inline bool on_stack(struct stack_info *info,
return addr >= info->begin && addr + len <= info->end;
}
static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
struct pt_regs *regs)
{
if (regs)
return (unsigned long) kernel_stack_pointer(regs);
if (task == current)
return current_stack_pointer();
return (unsigned long) task->thread.ksp;
}
/*
* Stack layout of a C stack frame.
*/
@ -74,6 +64,16 @@ struct stack_frame {
((unsigned long)__builtin_frame_address(0) - \
offsetof(struct stack_frame, back_chain))
static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
struct pt_regs *regs)
{
if (regs)
return (unsigned long)kernel_stack_pointer(regs);
if (task == current)
return current_frame_address();
return (unsigned long)task->thread.ksp;
}
/*
* To keep this simple mark register 2-6 as being changed (volatile)
* by the called function, even though register 6 is saved/nonvolatile.

View File

@ -55,10 +55,10 @@ static inline bool unwind_error(struct unwind_state *state)
return state->error;
}
static inline void unwind_start(struct unwind_state *state,
struct task_struct *task,
struct pt_regs *regs,
unsigned long first_frame)
static __always_inline void unwind_start(struct unwind_state *state,
struct task_struct *task,
struct pt_regs *regs,
unsigned long first_frame)
{
task = task ?: current;
first_frame = first_frame ?: get_stack_pointer(task, regs);

View File

@ -140,10 +140,10 @@ _LPP_OFFSET = __LC_LPP
TSTMSK __LC_MCCK_CODE,(MCCK_CODE_STG_ERROR|MCCK_CODE_STG_KEY_ERROR)
jnz \errlabel
TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_DEGRAD
jz oklabel\@
jz .Loklabel\@
TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_FAIL_ADDR
jnz \errlabel
oklabel\@:
.Loklabel\@:
.endm
#if IS_ENABLED(CONFIG_KVM)

View File

@ -341,13 +341,13 @@ NOKPROBE_SYMBOL(prepare_ftrace_return);
*/
int ftrace_enable_ftrace_graph_caller(void)
{
brcl_disable(__va(ftrace_graph_caller));
brcl_disable(ftrace_graph_caller);
return 0;
}
int ftrace_disable_ftrace_graph_caller(void)
{
brcl_enable(__va(ftrace_graph_caller));
brcl_enable(ftrace_graph_caller);
return 0;
}

View File

@ -158,6 +158,14 @@ static size_t cfdiag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
return need;
}
static const u64 cpumf_ctr_ctl[CPUMF_CTR_SET_MAX] = {
[CPUMF_CTR_SET_BASIC] = 0x02,
[CPUMF_CTR_SET_USER] = 0x04,
[CPUMF_CTR_SET_CRYPTO] = 0x08,
[CPUMF_CTR_SET_EXT] = 0x01,
[CPUMF_CTR_SET_MT_DIAG] = 0x20,
};
/* Read out all counter sets and save them in the provided data buffer.
* The last 64 byte host an artificial trailer entry.
*/

View File

@ -50,6 +50,7 @@
#include <linux/compat.h>
#include <linux/start_kernel.h>
#include <linux/hugetlb.h>
#include <linux/kmemleak.h>
#include <asm/boot_data.h>
#include <asm/ipl.h>
@ -356,9 +357,12 @@ void *restart_stack;
unsigned long stack_alloc(void)
{
#ifdef CONFIG_VMAP_STACK
return (unsigned long)__vmalloc_node(THREAD_SIZE, THREAD_SIZE,
THREADINFO_GFP, NUMA_NO_NODE,
__builtin_return_address(0));
void *ret;
ret = __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP,
NUMA_NO_NODE, __builtin_return_address(0));
kmemleak_not_leak(ret);
return (unsigned long)ret;
#else
return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
#endif

View File

@ -95,6 +95,7 @@ __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
#endif
static unsigned int smp_max_threads __initdata = -1U;
cpumask_t cpu_setup_mask;
static int __init early_nosmt(char *s)
{
@ -902,13 +903,14 @@ static void smp_start_secondary(void *cpuvoid)
vtime_init();
vdso_getcpu_init();
pfault_init();
cpumask_set_cpu(cpu, &cpu_setup_mask);
update_cpu_masks();
notify_cpu_starting(cpu);
if (topology_cpu_dedicated(cpu))
set_cpu_flag(CIF_DEDICATED_CPU);
else
clear_cpu_flag(CIF_DEDICATED_CPU);
set_cpu_online(cpu, true);
update_cpu_masks();
inc_irq_stat(CPU_RST);
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
@ -950,10 +952,13 @@ early_param("possible_cpus", _setup_possible_cpus);
int __cpu_disable(void)
{
unsigned long cregs[16];
int cpu;
/* Handle possible pending IPIs */
smp_handle_ext_call();
set_cpu_online(smp_processor_id(), false);
cpu = smp_processor_id();
set_cpu_online(cpu, false);
cpumask_clear_cpu(cpu, &cpu_setup_mask);
update_cpu_masks();
/* Disable pseudo page faults on this cpu. */
pfault_fini();

View File

@ -67,7 +67,7 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
static cpumask_t mask;
cpumask_clear(&mask);
if (!cpu_online(cpu))
if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
goto out;
cpumask_set_cpu(cpu, &mask);
switch (topology_mode) {
@ -88,7 +88,7 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
case TOPOLOGY_MODE_SINGLE:
break;
}
cpumask_and(&mask, &mask, cpu_online_mask);
cpumask_and(&mask, &mask, &cpu_setup_mask);
out:
cpumask_copy(dst, &mask);
}
@ -99,16 +99,16 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
int i;
cpumask_clear(&mask);
if (!cpu_online(cpu))
if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
goto out;
cpumask_set_cpu(cpu, &mask);
if (topology_mode != TOPOLOGY_MODE_HW)
goto out;
cpu -= cpu % (smp_cpu_mtid + 1);
for (i = 0; i <= smp_cpu_mtid; i++)
if (cpu_present(cpu + i))
for (i = 0; i <= smp_cpu_mtid; i++) {
if (cpumask_test_cpu(cpu + i, &cpu_setup_mask))
cpumask_set_cpu(cpu + i, &mask);
cpumask_and(&mask, &mask, cpu_online_mask);
}
out:
cpumask_copy(dst, &mask);
}
@ -569,6 +569,7 @@ void __init topology_init_early(void)
alloc_masks(info, &book_info, 2);
alloc_masks(info, &drawer_info, 3);
out:
cpumask_set_cpu(0, &cpu_setup_mask);
__arch_update_cpu_topology();
__arch_update_dedicated_flag(NULL);
}

View File

@ -27,7 +27,6 @@
/**
* gmap_alloc - allocate and initialize a guest address space
* @mm: pointer to the parent mm_struct
* @limit: maximum address of the gmap address space
*
* Returns a guest address space structure.
@ -504,7 +503,7 @@ EXPORT_SYMBOL_GPL(gmap_translate);
/**
* gmap_unlink - disconnect a page table from the gmap shadow tables
* @gmap: pointer to guest mapping meta data structure
* @mm: pointer to the parent mm_struct
* @table: pointer to the host page table
* @vmaddr: vm address associated with the host page table
*/
@ -527,7 +526,7 @@ static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
unsigned long gaddr);
/**
* gmap_link - set up shadow page tables to connect a host to a guest address
* __gmap_link - set up shadow page tables to connect a host to a guest address
* @gmap: pointer to guest mapping meta data structure
* @gaddr: guest address
* @vmaddr: vm address
@ -1971,7 +1970,7 @@ out_free:
EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
/**
* gmap_shadow_lookup_pgtable - find a shadow page table
* gmap_shadow_pgt_lookup - find a shadow page table
* @sg: pointer to the shadow guest address space structure
* @saddr: the address in the shadow aguest address space
* @pgt: parent gmap address of the page table to get shadowed
@ -2165,7 +2164,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
}
EXPORT_SYMBOL_GPL(gmap_shadow_page);
/**
/*
* gmap_shadow_notify - handle notifications for shadow gmap
*
* Called with sg->parent->shadow_lock.
@ -2225,7 +2224,7 @@ static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
/**
* ptep_notify - call all invalidation callbacks for a specific pte.
* @mm: pointer to the process mm_struct
* @addr: virtual address in the process address space
* @vmaddr: virtual address in the process address space
* @pte: pointer to the page table entry
* @bits: bits from the pgste that caused the notify call
*

View File

@ -834,7 +834,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
}
EXPORT_SYMBOL(set_guest_storage_key);
/**
/*
* Conditionally set a guest storage key (handling csske).
* oldkey will be updated when either mr or mc is set and a pointer is given.
*
@ -867,7 +867,7 @@ int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
}
EXPORT_SYMBOL(cond_set_guest_storage_key);
/**
/*
* Reset a guest reference bit (rrbe), returning the reference and changed bit.
*
* Returns < 0 in case of error, otherwise the cc to be reported to the guest.

View File

@ -383,8 +383,8 @@ static int clp_find_pci(struct clp_req_rsp_list_pci *rrb, u32 fid,
rc = clp_list_pci_req(rrb, &resume_token, &nentries);
if (rc)
return rc;
fh_list = rrb->response.fh_list;
for (i = 0; i < nentries; i++) {
fh_list = rrb->response.fh_list;
if (fh_list[i].fid == fid) {
*entry = fh_list[i];
return 0;
@ -449,14 +449,17 @@ int clp_get_state(u32 fid, enum zpci_state *state)
struct clp_fh_list_entry entry;
int rc;
*state = ZPCI_FN_STATE_RESERVED;
rrb = clp_alloc_block(GFP_ATOMIC);
if (!rrb)
return -ENOMEM;
rc = clp_find_pci(rrb, fid, &entry);
if (!rc)
if (!rc) {
*state = entry.config_state;
} else if (rc == -ENODEV) {
*state = ZPCI_FN_STATE_RESERVED;
rc = 0;
}
clp_free_block(rrb);
return rc;

View File

@ -2,17 +2,6 @@
comment "S/390 block device drivers"
depends on S390 && BLOCK
config BLK_DEV_XPRAM
def_tristate m
prompt "XPRAM disk support"
depends on S390 && BLOCK
help
Select this option if you want to use your expanded storage on S/390
or zSeries as a disk. This is useful as a _fast_ swap device if you
want to access more than 2G of memory when running in 31 bit mode.
This option is also available as a module which will be called
xpram. If unsure, say "N".
config DCSSBLK
def_tristate m
select FS_DAX_LIMITED

View File

@ -16,7 +16,6 @@ obj-$(CONFIG_DASD) += dasd_mod.o
obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
obj-$(CONFIG_DCSSBLK) += dcssblk.o
scm_block-objs := scm_drv.o scm_blk.o

View File

@ -1,416 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Xpram.c -- the S/390 expanded memory RAM-disk
*
* significant parts of this code are based on
* the sbull device driver presented in
* A. Rubini: Linux Device Drivers
*
* Author of XPRAM specific coding: Reinhard Buendgen
* buendgen@de.ibm.com
* Rewrite for 2.5: Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* External interfaces:
* Interfaces to linux kernel
* xpram_setup: read kernel parameters
* Device specific file operations
* xpram_iotcl
* xpram_open
*
* "ad-hoc" partitioning:
* the expanded memory can be partitioned among several devices
* (with different minors). The partitioning set up can be
* set by kernel or module parameters (int devs & int sizes[])
*
* Potential future improvements:
* generic hard disk support to replace ad-hoc partitioning
*/
#define KMSG_COMPONENT "xpram"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ctype.h> /* isdigit, isxdigit */
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/hdreg.h> /* HDIO_GETGEO */
#include <linux/device.h>
#include <linux/bio.h>
#include <linux/gfp.h>
#include <linux/uaccess.h>
#define XPRAM_NAME "xpram"
#define XPRAM_DEVS 1 /* one partition */
#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */
typedef struct {
unsigned int size; /* size of xpram segment in pages */
unsigned int offset; /* start page of xpram segment */
} xpram_device_t;
static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
static unsigned int xpram_sizes[XPRAM_MAX_DEVS];
static struct gendisk *xpram_disks[XPRAM_MAX_DEVS];
static unsigned int xpram_pages;
static int xpram_devs;
/*
* Parameter parsing functions.
*/
static int devs = XPRAM_DEVS;
static char *sizes[XPRAM_MAX_DEVS];
module_param(devs, int, 0);
module_param_array(sizes, charp, NULL, 0);
MODULE_PARM_DESC(devs, "number of devices (\"partitions\"), " \
"the default is " __MODULE_STRING(XPRAM_DEVS) "\n");
MODULE_PARM_DESC(sizes, "list of device (partition) sizes " \
"the defaults are 0s \n" \
"All devices with size 0 equally partition the "
"remaining space on the expanded strorage not "
"claimed by explicit sizes\n");
MODULE_LICENSE("GPL");
/*
* Copy expanded memory page (4kB) into main memory
* Arguments
* page_addr: address of target page
* xpage_index: index of expandeded memory page
* Return value
* 0: if operation succeeds
* -EIO: if pgin failed
* -ENXIO: if xpram has vanished
*/
static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
{
int cc = 2; /* return unused cc 2 if pgin traps */
asm volatile(
" .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
if (cc == 3)
return -ENXIO;
if (cc == 2)
return -ENXIO;
if (cc == 1)
return -EIO;
return 0;
}
/*
* Copy a 4kB page of main memory to an expanded memory page
* Arguments
* page_addr: address of source page
* xpage_index: index of expandeded memory page
* Return value
* 0: if operation succeeds
* -EIO: if pgout failed
* -ENXIO: if xpram has vanished
*/
static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
{
int cc = 2; /* return unused cc 2 if pgin traps */
asm volatile(
" .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
if (cc == 3)
return -ENXIO;
if (cc == 2)
return -ENXIO;
if (cc == 1)
return -EIO;
return 0;
}
/*
* Check if xpram is available.
*/
static int __init xpram_present(void)
{
unsigned long mem_page;
int rc;
mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
if (!mem_page)
return -ENOMEM;
rc = xpram_page_in(mem_page, 0);
free_page(mem_page);
return rc ? -ENXIO : 0;
}
/*
* Return index of the last available xpram page.
*/
static unsigned long __init xpram_highest_page_index(void)
{
unsigned int page_index, add_bit;
unsigned long mem_page;
mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
if (!mem_page)
return 0;
page_index = 0;
add_bit = 1ULL << (sizeof(unsigned int)*8 - 1);
while (add_bit > 0) {
if (xpram_page_in(mem_page, page_index | add_bit) == 0)
page_index |= add_bit;
add_bit >>= 1;
}
free_page (mem_page);
return page_index;
}
/*
* Block device make request function.
*/
static blk_qc_t xpram_submit_bio(struct bio *bio)
{
xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
struct bio_vec bvec;
struct bvec_iter iter;
unsigned int index;
unsigned long page_addr;
unsigned long bytes;
blk_queue_split(&bio);
if ((bio->bi_iter.bi_sector & 7) != 0 ||
(bio->bi_iter.bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
if ((bio->bi_iter.bi_size >> 12) > xdev->size)
/* Request size is no page-aligned. */
goto fail;
if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
goto fail;
index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
bio_for_each_segment(bvec, bio, iter) {
page_addr = (unsigned long)
kmap(bvec.bv_page) + bvec.bv_offset;
bytes = bvec.bv_len;
if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
/* More paranoia. */
goto fail;
while (bytes > 0) {
if (bio_data_dir(bio) == READ) {
if (xpram_page_in(page_addr, index) != 0)
goto fail;
} else {
if (xpram_page_out(page_addr, index) != 0)
goto fail;
}
page_addr += 4096;
bytes -= 4096;
index++;
}
}
bio_endio(bio);
return BLK_QC_T_NONE;
fail:
bio_io_error(bio);
return BLK_QC_T_NONE;
}
static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
unsigned long size;
/*
* get geometry: we have to fake one... trim the size to a
* multiple of 64 (32k): tell we have 16 sectors, 4 heads,
* whatever cylinders. Tell also that data starts at sector. 4.
*/
size = (xpram_pages * 8) & ~0x3f;
geo->cylinders = size >> 6;
geo->heads = 4;
geo->sectors = 16;
geo->start = 4;
return 0;
}
static const struct block_device_operations xpram_devops =
{
.owner = THIS_MODULE,
.submit_bio = xpram_submit_bio,
.getgeo = xpram_getgeo,
};
/*
* Setup xpram_sizes array.
*/
static int __init xpram_setup_sizes(unsigned long pages)
{
unsigned long mem_needed;
unsigned long mem_auto;
unsigned long long size;
char *sizes_end;
int mem_auto_no;
int i;
/* Check number of devices. */
if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
pr_err("%d is not a valid number of XPRAM devices\n",devs);
return -EINVAL;
}
xpram_devs = devs;
/*
* Copy sizes array to xpram_sizes and align partition
* sizes to page boundary.
*/
mem_needed = 0;
mem_auto_no = 0;
for (i = 0; i < xpram_devs; i++) {
if (sizes[i]) {
size = simple_strtoull(sizes[i], &sizes_end, 0);
switch (*sizes_end) {
case 'g':
case 'G':
size <<= 20;
break;
case 'm':
case 'M':
size <<= 10;
}
xpram_sizes[i] = (size + 3) & -4UL;
}
if (xpram_sizes[i])
mem_needed += xpram_sizes[i];
else
mem_auto_no++;
}
pr_info(" number of devices (partitions): %d \n", xpram_devs);
for (i = 0; i < xpram_devs; i++) {
if (xpram_sizes[i])
pr_info(" size of partition %d: %u kB\n",
i, xpram_sizes[i]);
else
pr_info(" size of partition %d to be set "
"automatically\n",i);
}
pr_info(" memory needed (for sized partitions): %lu kB\n",
mem_needed);
pr_info(" partitions to be sized automatically: %d\n",
mem_auto_no);
if (mem_needed > pages * 4) {
pr_err("Not enough expanded memory available\n");
return -EINVAL;
}
/*
* partitioning:
* xpram_sizes[i] != 0; partition i has size xpram_sizes[i] kB
* else: ; all partitions with zero xpram_sizes[i]
* partition equally the remaining space
*/
if (mem_auto_no) {
mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
pr_info(" automatically determined "
"partition size: %lu kB\n", mem_auto);
for (i = 0; i < xpram_devs; i++)
if (xpram_sizes[i] == 0)
xpram_sizes[i] = mem_auto;
}
return 0;
}
static int __init xpram_setup_blkdev(void)
{
unsigned long offset;
int i, rc = -ENOMEM;
for (i = 0; i < xpram_devs; i++) {
xpram_disks[i] = blk_alloc_disk(NUMA_NO_NODE);
if (!xpram_disks[i])
goto out;
blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_disks[i]->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM,
xpram_disks[i]->queue);
blk_queue_logical_block_size(xpram_disks[i]->queue, 4096);
}
/*
* Register xpram major.
*/
rc = register_blkdev(XPRAM_MAJOR, XPRAM_NAME);
if (rc < 0)
goto out;
/*
* Setup device structures.
*/
offset = 0;
for (i = 0; i < xpram_devs; i++) {
struct gendisk *disk = xpram_disks[i];
xpram_devices[i].size = xpram_sizes[i] / 4;
xpram_devices[i].offset = offset;
offset += xpram_devices[i].size;
disk->major = XPRAM_MAJOR;
disk->first_minor = i;
disk->minors = 1;
disk->fops = &xpram_devops;
disk->private_data = &xpram_devices[i];
sprintf(disk->disk_name, "slram%d", i);
set_capacity(disk, xpram_sizes[i] << 1);
add_disk(disk);
}
return 0;
out:
while (i--)
blk_cleanup_disk(xpram_disks[i]);
return rc;
}
/*
* Finally, the init/exit functions.
*/
static void __exit xpram_exit(void)
{
int i;
for (i = 0; i < xpram_devs; i++) {
del_gendisk(xpram_disks[i]);
blk_cleanup_disk(xpram_disks[i]);
}
unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
}
static int __init xpram_init(void)
{
int rc;
/* Find out size of expanded memory. */
if (xpram_present() != 0) {
pr_err("No expanded memory available\n");
return -ENODEV;
}
xpram_pages = xpram_highest_page_index() + 1;
pr_info(" %u pages expanded memory found (%lu KB).\n",
xpram_pages, (unsigned long) xpram_pages*4);
rc = xpram_setup_sizes(xpram_pages);
if (rc)
return rc;
return xpram_setup_blkdev();
}
module_init(xpram_init);
module_exit(xpram_exit);

View File

@ -292,13 +292,15 @@ con3270_update(struct timer_list *t)
* Read tasklet.
*/
static void
con3270_read_tasklet(struct raw3270_request *rrq)
con3270_read_tasklet(unsigned long data)
{
static char kreset_data = TW_KR;
struct raw3270_request *rrq;
struct con3270 *cp;
unsigned long flags;
int nr_up, deactivate;
rrq = (struct raw3270_request *)data;
cp = (struct con3270 *) rrq->view;
spin_lock_irqsave(&cp->view.lock, flags);
nr_up = cp->nr_up;
@ -625,8 +627,7 @@ con3270_init(void)
INIT_LIST_HEAD(&condev->lines);
INIT_LIST_HEAD(&condev->update);
timer_setup(&condev->timer, con3270_update, 0);
tasklet_init(&condev->readlet,
(void (*)(unsigned long)) con3270_read_tasklet,
tasklet_init(&condev->readlet, con3270_read_tasklet,
(unsigned long) condev->read);
raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ);

View File

@ -34,12 +34,13 @@ void schedule_sysrq_work(struct sysrq_work *sw)
/**
* Check for special chars at start of input.
* ctrlchar_handle - check for special chars at start of input
*
* @param buf Console input buffer.
* @param len Length of valid data in buffer.
* @param tty The tty struct for this console.
* @return CTRLCHAR_NONE, if nothing matched,
* @buf: console input buffer
* @len: length of valid data in buffer
* @tty: the tty struct for this console
*
* Return: CTRLCHAR_NONE, if nothing matched,
* CTRLCHAR_SYSRQ, if sysrq was encountered
* otherwise char to be inserted logically or'ed
* with CTRLCHAR_CTRL

View File

@ -26,7 +26,7 @@
* struct hmcdrv_ftp_ops - HMC drive FTP operations
* @startup: startup function
* @shutdown: shutdown function
* @cmd: FTP transfer function
* @transfer: FTP transfer function
*/
struct hmcdrv_ftp_ops {
int (*startup)(void);

View File

@ -28,7 +28,7 @@
#define SCLP_HEADER "sclp: "
struct sclp_trace_entry {
char id[4];
char id[4] __nonstring;
u32 a;
u64 b;
};

View File

@ -262,7 +262,10 @@ static int blacklist_parse_proc_parameters(char *buf)
if (strcmp("free", parm) == 0) {
rc = blacklist_parse_parameters(buf, free, 0);
css_schedule_eval_all_unreg(0);
/* There could be subchannels without proper devices connected.
* evaluate all the entries
*/
css_schedule_eval_all();
} else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0)

View File

@ -867,19 +867,6 @@ out_err:
wake_up(&ccw_device_init_wq);
}
static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
{
struct subchannel *sch;
/* Get subchannel reference for local processing. */
if (!get_device(cdev->dev.parent))
return;
sch = to_subchannel(cdev->dev.parent);
css_sch_device_unregister(sch);
/* Release subchannel reference for local processing. */
put_device(&sch->dev);
}
/*
* subchannel recognition done. Called from the state machine.
*/
@ -1857,10 +1844,10 @@ static void ccw_device_todo(struct work_struct *work)
css_schedule_eval(sch->schid);
fallthrough;
case CDEV_TODO_UNREG:
if (sch_is_pseudo_sch(sch))
ccw_device_unregister(cdev);
else
ccw_device_call_sch_unregister(cdev);
spin_lock_irq(sch->lock);
sch_set_cdev(sch, NULL);
spin_unlock_irq(sch->lock);
ccw_device_unregister(cdev);
break;
default:
break;

View File

@ -91,7 +91,7 @@ static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
}
/**
* diag_get_dev_info - retrieve device information via diag 0x210
* diag210_get_dev_info - retrieve device information via diag 0x210
* @cdev: ccw device
*
* Returns zero on success, non-zero otherwise.

View File

@ -71,7 +71,7 @@ static LIST_HEAD(zcrypt_ops_list);
/* Zcrypt related debug feature stuff. */
debug_info_t *zcrypt_dbf_info;
/**
/*
* Process a rescan of the transport layer.
*
* Returns 1, if the rescan has been processed, otherwise 0.
@ -462,7 +462,7 @@ static void zcdn_destroy_all(void)
#endif
/**
/*
* zcrypt_read (): Not supported beyond zcrypt 1.3.1.
*
* This function is not supported beyond zcrypt 1.3.1.
@ -473,7 +473,7 @@ static ssize_t zcrypt_read(struct file *filp, char __user *buf,
return -EPERM;
}
/**
/*
* zcrypt_write(): Not allowed.
*
* Write is is not allowed
@ -484,7 +484,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
return -EPERM;
}
/**
/*
* zcrypt_open(): Count number of users.
*
* Device open function to count number of users.
@ -512,7 +512,7 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
return stream_open(inode, filp);
}
/**
/*
* zcrypt_release(): Count number of users.
*
* Device close function to count number of users.
@ -2153,7 +2153,7 @@ static void zcdn_exit(void)
#endif
/**
/*
* zcrypt_api_init(): Module initialization.
*
* The module initialization code.
@ -2191,7 +2191,7 @@ out:
return rc;
}
/**
/*
* zcrypt_api_exit(): Module termination.
*
* The module termination code.

View File

@ -65,7 +65,7 @@ static struct ap_device_id zcrypt_cex2a_queue_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids);
/**
/*
* Probe function for CEX2A card devices. It always accepts the AP device
* since the bus_match already checked the card type.
* @ap_dev: pointer to the AP device.
@ -124,7 +124,7 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
return rc;
}
/**
/*
* This is called to remove the CEX2A card driver information
* if an AP card device is removed.
*/
@ -142,7 +142,7 @@ static struct ap_driver zcrypt_cex2a_card_driver = {
.flags = AP_DRIVER_FLAG_DEFAULT,
};
/**
/*
* Probe function for CEX2A queue devices. It always accepts the AP device
* since the bus_match already checked the queue type.
* @ap_dev: pointer to the AP device.
@ -183,7 +183,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
return rc;
}
/**
/*
* This is called to remove the CEX2A queue driver information
* if an AP queue device is removed.
*/

View File

@ -171,7 +171,7 @@ static const struct attribute_group cca_queue_attr_grp = {
.attrs = cca_queue_attrs,
};
/**
/*
* Large random number detection function. Its sends a message to a CEX2C/CEX3C
* card to find out if large random numbers are supported.
* @ap_dev: pointer to the AP device.
@ -237,7 +237,7 @@ out_free:
return rc;
}
/**
/*
* Probe function for CEX2C/CEX3C card devices. It always accepts the
* AP device since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP card device.
@ -303,7 +303,7 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
return rc;
}
/**
/*
* This is called to remove the CEX2C/CEX3C card driver information
* if an AP card device is removed.
*/
@ -325,7 +325,7 @@ static struct ap_driver zcrypt_cex2c_card_driver = {
.flags = AP_DRIVER_FLAG_DEFAULT,
};
/**
/*
* Probe function for CEX2C/CEX3C queue devices. It always accepts the
* AP device since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP card device.
@ -376,7 +376,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
return rc;
}
/**
/*
* This is called to remove the CEX2C/CEX3C queue driver information
* if an AP queue device is removed.
*/

View File

@ -394,7 +394,7 @@ static const struct attribute_group ep11_queue_attr_grp = {
.attrs = ep11_queue_attrs,
};
/**
/*
* Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
@ -562,7 +562,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
return rc;
}
/**
/*
* This is called to remove the CEX4/CEX5/CEX6/CEX7 card driver
* information if an AP card device is removed.
*/
@ -586,7 +586,7 @@ static struct ap_driver zcrypt_cex4_card_driver = {
.flags = AP_DRIVER_FLAG_DEFAULT,
};
/**
/*
* Probe function for CEX4/CEX5/CEX6/CEX7 queue device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
@ -652,7 +652,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
return rc;
}
/**
/*
* This is called to remove the CEX4/CEX5/CEX6/CEX7 queue driver
* information if an AP queue device is removed.
*/

View File

@ -39,7 +39,7 @@ MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
/**
/*
* The type 50 message family is associated with a CEXxA cards.
*
* The four members of the family are described below.
@ -136,7 +136,7 @@ struct type50_crb3_msg {
unsigned char message[512];
} __packed;
/**
/*
* The type 80 response family is associated with a CEXxA cards.
*
* Note that all unsigned char arrays are right-justified and left-padded
@ -188,7 +188,7 @@ unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
return 0;
}
/**
/*
* Convert a ICAMEX message to a type50 MEX message.
*
* @zq: crypto queue pointer
@ -255,7 +255,7 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
return 0;
}
/**
/*
* Convert a ICACRT message to a type50 CRT message.
*
* @zq: crypto queue pointer
@ -346,7 +346,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
return 0;
}
/**
/*
* Copy results from a type 80 reply message back to user space.
*
* @zq: crypto device pointer
@ -418,7 +418,7 @@ static int convert_response_cex2a(struct zcrypt_queue *zq,
}
}
/**
/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
@ -457,7 +457,7 @@ out:
static atomic_t zcrypt_step = ATOMIC_INIT(0);
/**
/*
* The request distributor calls this function if it picked the CEXxA
* device to handle a modexpo request.
* @zq: pointer to zcrypt_queue structure that identifies the
@ -502,7 +502,7 @@ out:
return rc;
}
/**
/*
* The request distributor calls this function if it picked the CEXxA
* device to handle a modexpo_crt request.
* @zq: pointer to zcrypt_queue structure that identifies the
@ -547,7 +547,7 @@ out:
return rc;
}
/**
/*
* The crypto operations for message type 50.
*/
static struct zcrypt_ops zcrypt_msgtype50_ops = {

View File

@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
/**
/*
* CPRB
* Note that all shorts, ints and longs are little-endian.
* All pointer fields are 32-bits long, and mean nothing
@ -107,7 +107,7 @@ struct function_and_rules_block {
unsigned char only_rule[8];
} __packed;
/**
/*
* The following is used to initialize the CPRBX passed to the CEXxC/CEXxP
* card in a type6 message. The 3 fields that must be filled in at execution
* time are req_parml, rpl_parml and usage_domain.
@ -236,7 +236,7 @@ int speed_idx_ep11(int req_type)
}
/**
/*
* Convert a ICAMEX message to a type6 MEX message.
*
* @zq: crypto device pointer
@ -305,7 +305,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
return 0;
}
/**
/*
* Convert a ICACRT message to a type6 CRT message.
*
* @zq: crypto device pointer
@ -374,7 +374,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
return 0;
}
/**
/*
* Convert a XCRB message to a type6 CPRB message.
*
* @zq: crypto device pointer
@ -571,7 +571,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap
return 0;
}
/**
/*
* Copy results from a type 86 ICA reply message back to user space.
*
* @zq: crypto device pointer
@ -697,7 +697,7 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
return 0;
}
/**
/*
* Copy results from a type 86 XCRB reply message back to user space.
*
* @zq: crypto device pointer
@ -728,7 +728,7 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
return 0;
}
/**
/*
* Copy results from a type 86 EP11 XCRB reply message back to user space.
*
* @zq: crypto device pointer
@ -911,7 +911,7 @@ static int convert_response_rng(struct zcrypt_queue *zq,
}
}
/**
/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
@ -966,7 +966,7 @@ out:
complete(&(resp_type->work));
}
/**
/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
@ -1015,7 +1015,7 @@ out:
static atomic_t zcrypt_step = ATOMIC_INIT(0);
/**
/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a modexpo request.
* @zq: pointer to zcrypt_queue structure that identifies the
@ -1063,7 +1063,7 @@ out_free:
return rc;
}
/**
/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a modexpo_crt request.
* @zq: pointer to zcrypt_queue structure that identifies the
@ -1112,7 +1112,7 @@ out_free:
return rc;
}
/**
/*
* Fetch function code from cprb.
* Extracting the fc requires to copy the cprb from userspace.
* So this function allocates memory and needs an ap_msg prepared
@ -1140,7 +1140,7 @@ unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *xcRB,
return XCRB_msg_to_type6CPRB_msgX(userspace, ap_msg, xcRB, func_code, dom);
}
/**
/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a send_cprb request.
* @zq: pointer to zcrypt_queue structure that identifies the
@ -1170,7 +1170,7 @@ out:
return rc;
}
/**
/*
* Fetch function code from ep11 cprb.
* Extracting the fc requires to copy the ep11 cprb from userspace.
* So this function allocates memory and needs an ap_msg prepared
@ -1198,7 +1198,7 @@ unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *xcrb,
return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, func_code);
}
/**
/*
* The request distributor calls this function if it picked the CEX4P
* device to handle a send_ep11_cprb request.
* @zq: pointer to zcrypt_queue structure that identifies the
@ -1228,7 +1228,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
} __packed * payload_hdr = NULL;
/**
/*
* The target domain field within the cprb body/payload block will be
* replaced by the usage domain for non-management commands only.
* Therefore we check the first bit of the 'flags' parameter for
@ -1299,7 +1299,7 @@ unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
return 0;
}
/**
/*
* The request distributor calls this function if it picked the CEXxC
* device to generate random data.
* @zq: pointer to zcrypt_queue structure that identifies the
@ -1339,7 +1339,7 @@ out:
return rc;
}
/**
/*
* The crypto operations for a CEXxC card.
*/
static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {

View File

@ -766,7 +766,7 @@ static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
}
/**
* zfcp_adapter_debug_register - registers debug feature for an adapter
* zfcp_dbf_adapter_register - registers debug feature for an adapter
* @adapter: pointer to adapter for which debug features should be registered
* return: -ENOMEM on error, 0 otherwise
*/
@ -824,7 +824,7 @@ err_out:
}
/**
* zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
* zfcp_dbf_adapter_unregister - unregisters debug feature for an adapter
* @adapter: pointer to adapter for which debug features should be unregistered
*/
void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)

View File

@ -2275,7 +2275,7 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
}
/**
* zfcp_fsf_close_LUN - close LUN
* zfcp_fsf_close_lun - close LUN
* @erp_action: pointer to erp_action triggering the "close LUN"
* Returns: 0 on success, error otherwise
*/

View File

@ -384,7 +384,7 @@ free_req_q:
}
/**
* zfcp_close_qdio - close qdio queues for an adapter
* zfcp_qdio_close - close qdio queues for an adapter
* @qdio: pointer to structure zfcp_qdio
*/
void zfcp_qdio_close(struct zfcp_qdio *qdio)

View File

@ -111,9 +111,9 @@ static void zfcp_unit_release(struct device *dev)
}
/**
* zfcp_unit_enqueue - enqueue unit to unit list of a port.
* zfcp_unit_add - add unit to unit list of a port.
* @port: pointer to port where unit is added
* @fcp_lun: FCP LUN of unit to be enqueued
* @fcp_lun: FCP LUN of unit to be added
* Returns: 0 success
*
* Sets up some unit internal structures and creates sysfs entry.

View File

@ -234,8 +234,7 @@ config ARCH_SUPPORTS_HUGETLBFS
config HUGETLBFS
bool "HugeTLB file system support"
depends on X86 || IA64 || SPARC64 || (S390 && 64BIT) || \
ARCH_SUPPORTS_HUGETLBFS || BROKEN
depends on X86 || IA64 || SPARC64 || ARCH_SUPPORTS_HUGETLBFS || BROKEN
help
hugetlbfs is a filesystem backing for HugeTLB pages, based on
ramfs. For architectures that support it, say Y here and read