s390 updates for 6.5 merge window part 2

- Fix virtual vs physical address confusion in vmem_add_range()
   and vmem_remove_range() functions.
 
 - Include <linux/io.h> instead of <asm/io.h> and <asm-generic/io.h>
   throughout s390 code.
 
 - Make all PSW related defines also available for assembler files.
   Remove PSW_DEFAULT_KEY define from uapi for that.
 
 - When adding an undefined symbol the build still succeeds, but
   userspace crashes trying to execute VDSO, because the symbol
   is not resolved. Add undefined symbols check to prevent that.
 
 - Use kvmalloc_array() instead of kzalloc() for allocaton of 256k
   memory when executing s390 crypto adapter IOCTL.
 
 - Add -fPIE flag to prevent decompressor misaligned symbol build
   error with clang.
 
 - Use .balign instead of .align everywhere. This is a no-op for s390,
   but with this there no mix in using .align and .balign anymore.
 
 - Filter out -mno-pic-data-is-text-relative flag when compiling
   kernel to prevent VDSO build error.
 
 - Rework entering of DAT-on mode on CPU restart to use PSW_KERNEL_BITS
   mask directly.
 
 - Do not retry administrative requests to some s390 crypto cards,
   since the firmware assumes replay attacks.
 
 - Remove most of the debug code, which is build in when kernel config
   option CONFIG_ZCRYPT_DEBUG is enabled.
 
 - Remove CONFIG_ZCRYPT_MULTIDEVNODES kernel config option and switch
   off the multiple devices support for the s390 zcrypt device driver.
 
 - With the conversion to generic entry machine checks are accounted
   to the current context instead of irq time. As result, the STCKF
   instruction at the beginning of the machine check handler and the
   lowcore member are no longer required, therefore remove it.
 
 - Fix various typos found with codespell.
 
 - Minor cleanups to CPU-measurement Counter and Sampling Facilities code.
 
 - Revert patch that removes VMEM_MAX_PHYS macro, since it causes
   a regression.
 -----BEGIN PGP SIGNATURE-----
 
 iI0EABYIADUWIQQrtrZiYVkVzKQcYivNdxKlNrRb8AUCZKakSBccYWdvcmRlZXZA
 bGludXguaWJtLmNvbQAKCRDNdxKlNrRb8HeIAQCg9RX3/olsZhCqRNLZ/O+6FXAF
 29ohi2JmVqxJBKkmwgEA/QXCjoTOp41pQJ1FD39HnI8DeYpJFRnYYE5D3acibAw=
 =2Ykk
 -----END PGP SIGNATURE-----

Merge tag 's390-6.5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Alexander Gordeev:

 - Fix virtual vs physical address confusion in vmem_add_range() and
   vmem_remove_range() functions

 - Include <linux/io.h> instead of <asm/io.h> and <asm-generic/io.h>
   throughout s390 code

 - Make all PSW related defines also available for assembler files.
   Remove PSW_DEFAULT_KEY define from uapi for that

 - When adding an undefined symbol the build still succeeds, but
   userspace crashes trying to execute VDSO, because the symbol is not
   resolved. Add undefined symbols check to prevent that

 - Use kvmalloc_array() instead of kzalloc() for allocaton of 256k
   memory when executing s390 crypto adapter IOCTL

 - Add -fPIE flag to prevent decompressor misaligned symbol build error
   with clang

 - Use .balign instead of .align everywhere. This is a no-op for s390,
   but with this there no mix in using .align and .balign anymore

 - Filter out -mno-pic-data-is-text-relative flag when compiling kernel
   to prevent VDSO build error

 - Rework entering of DAT-on mode on CPU restart to use PSW_KERNEL_BITS
   mask directly

 - Do not retry administrative requests to some s390 crypto cards, since
   the firmware assumes replay attacks

 - Remove most of the debug code, which is build in when kernel config
   option CONFIG_ZCRYPT_DEBUG is enabled

 - Remove CONFIG_ZCRYPT_MULTIDEVNODES kernel config option and switch
   off the multiple devices support for the s390 zcrypt device driver

 - With the conversion to generic entry machine checks are accounted to
   the current context instead of irq time. As result, the STCKF
   instruction at the beginning of the machine check handler and the
   lowcore member are no longer required, therefore remove it

 - Fix various typos found with codespell

 - Minor cleanups to CPU-measurement Counter and Sampling Facilities
   code

 - Revert patch that removes VMEM_MAX_PHYS macro, since it causes a
   regression

* tag 's390-6.5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (25 commits)
  Revert "s390/mm: get rid of VMEM_MAX_PHYS macro"
  s390/cpum_sf: remove check on CPU being online
  s390/cpum_sf: handle casts consistently
  s390/cpum_sf: remove unnecessary debug statement
  s390/cpum_sf: remove parameter in call to pr_err
  s390/cpum_sf: simplify function setup_pmu_cpu
  s390/cpum_cf: remove unneeded debug statements
  s390/entry: remove mcck clock
  s390: fix various typos
  s390/zcrypt: remove ZCRYPT_MULTIDEVNODES kernel config option
  s390/zcrypt: do not retry administrative requests
  s390/zcrypt: cleanup some debug code
  s390/entry: rework entering DAT-on mode on CPU restart
  s390/mm: fence off VM macros from asm and linker
  s390: include linux/io.h instead of asm/io.h
  s390/ptrace: make all psw related defines also available for asm
  s390/ptrace: remove PSW_DEFAULT_KEY from uapi
  s390/vdso: filter out mno-pic-data-is-text-relative cflag
  s390: consistently use .balign instead of .align
  s390/decompressor: fix misaligned symbol build error
  ...
This commit is contained in:
Linus Torvalds 2023-07-06 13:18:30 -07:00
commit a452483508
73 changed files with 235 additions and 428 deletions

View File

@ -27,6 +27,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbac
KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding
KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
KBUILD_CFLAGS_DECOMPRESSOR += -fPIE
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))

View File

@ -26,10 +26,10 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/workqueue.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/appldata.h>
#include <asm/vtimer.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/smp.h>
#include "appldata.h"

View File

@ -15,7 +15,7 @@
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/io.h>
#include "appldata.h"

View File

@ -67,7 +67,7 @@ ipl_start:
jz .Lagain1 # skip dataset header
larl %r13,.L_eof
clc 0(3,%r4),0(%r13) # if it is EOFx
jz .Lagain1 # skip dateset trailer
jz .Lagain1 # skip data set trailer
lgr %r5,%r2
la %r6,COMMAND_LINE-PARMAREA(%r12)
lgr %r7,%r2
@ -185,19 +185,19 @@ ipl_start:
larl %r13,.Lcrash
lpsw 0(%r13)
.align 8
.balign 8
.Lwaitpsw:
.quad 0x0202000180000000,.Lioint
.Lnewpswmask:
.quad 0x0000000180000000
.align 8
.balign 8
.Lorb: .long 0x00000000,0x0080ff00,.Lccws
.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.align 8
.balign 8
.Lcr6: .quad 0x00000000ff000000
.align 8
.balign 8
.Lcrash:.long 0x000a0000,0x00000000
.align 8
.balign 8
.Lccws: .rept 19
.long 0x02600050,0x00000000
.endr
@ -207,7 +207,7 @@ ipl_start:
.byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold"
.L_eof: .long 0xc5d6c600 /* C'EOF' */
.L_hdr: .long 0xc8c4d900 /* C'HDR' */
.align 8
.balign 8
.Lcpuid:.fill 8,1,0
#
@ -265,7 +265,7 @@ SYM_CODE_START_LOCAL(startup_normal)
brasl %r14,startup_kernel
SYM_CODE_END(startup_normal)
.align 8
.balign 8
6: .long 0x7fffffff,0xffffffff
.Lext_new_psw:
.quad 0x0002000180000000,0x1b0 # disabled wait

View File

@ -82,12 +82,12 @@ SYM_CODE_START_LOCAL(startup_kdump)
#
# Startup of kdump (relocated new kernel)
#
.align 2
.balign 2
startup_kdump_relocated:
basr %r13,0
0: lpswe .Lrestart_psw-0b(%r13) # Start new kernel...
SYM_CODE_END(startup_kdump)
.align 8
.balign 8
.Lrestart_psw:
.quad 0x0000000080000000,0x0000000000000000 + startup
#else
@ -95,7 +95,7 @@ SYM_CODE_START_LOCAL(startup_kdump)
larl %r13,startup_kdump_crash
lpswe 0(%r13)
SYM_CODE_END(startup_kdump)
.align 8
.balign 8
startup_kdump_crash:
.quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash
#endif /* CONFIG_CRASH_DUMP */

View File

@ -48,7 +48,7 @@
*
* Note that the constant definitions below are extended in order to compute
* intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction.
* The righmost doubleword can be 0 to prevent contribution to the result or
* The rightmost doubleword can be 0 to prevent contribution to the result or
* can be multiplied by 1 to perform an XOR without the need for a separate
* VECTOR EXCLUSIVE OR instruction.
*

View File

@ -333,7 +333,7 @@ union ap_qact_ap_info {
};
/**
* ap_qact(): Query AP combatibility type.
* ap_qact(): Query AP compatibility type.
* @qid: The AP queue number
* @apinfo: On input the info about the AP queue. On output the
* alternate AP queue info provided by the qact function

View File

@ -8,8 +8,8 @@
#ifndef _ASM_S390_APPLDATA_H
#define _ASM_S390_APPLDATA_H
#include <linux/io.h>
#include <asm/diag.h>
#include <asm/io.h>
#define APPLDATA_START_INTERVAL_REC 0x80
#define APPLDATA_STOP_REC 0x81

View File

@ -25,7 +25,7 @@
#define __EX_TABLE(_section, _fault, _target, _type) \
stringify_in_c(.section _section,"a";) \
stringify_in_c(.align 4;) \
stringify_in_c(.balign 4;) \
stringify_in_c(.long (_fault) - .;) \
stringify_in_c(.long (_target) - .;) \
stringify_in_c(.short (_type);) \
@ -34,7 +34,7 @@
#define __EX_TABLE_UA(_section, _fault, _target, _type, _regerr, _regaddr, _len)\
stringify_in_c(.section _section,"a";) \
stringify_in_c(.align 4;) \
stringify_in_c(.balign 4;) \
stringify_in_c(.long (_fault) - .;) \
stringify_in_c(.long (_target) - .;) \
stringify_in_c(.short (_type);) \

View File

@ -2,7 +2,7 @@
#ifndef _ASM_S390_DMA_H
#define _ASM_S390_DMA_H
#include <asm/io.h>
#include <linux/io.h>
/*
* MAX_DMA_ADDRESS is ambiguous because on s390 its completely unrelated

View File

@ -118,8 +118,8 @@ struct lowcore {
__u64 avg_steal_timer; /* 0x0300 */
__u64 last_update_timer; /* 0x0308 */
__u64 last_update_clock; /* 0x0310 */
__u64 int_clock; /* 0x0318*/
__u64 mcck_clock; /* 0x0320 */
__u64 int_clock; /* 0x0318 */
__u8 pad_0x0320[0x0328-0x0320]; /* 0x0320 */
__u64 clock_comparator; /* 0x0328 */
__u64 boot_clock[2]; /* 0x0330 */

View File

@ -19,7 +19,7 @@
#define PAGE_SHIFT _PAGE_SHIFT
#define PAGE_SIZE _PAGE_SIZE
#define PAGE_MASK _PAGE_MASK
#define PAGE_DEFAULT_ACC 0
#define PAGE_DEFAULT_ACC _AC(0, UL)
/* storage-protection override */
#define PAGE_SPO_ACC 9
#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
@ -179,8 +179,6 @@ int arch_make_page_accessible(struct page *page);
#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
#endif
#endif /* !__ASSEMBLY__ */
#define __PAGE_OFFSET 0x0UL
#define PAGE_OFFSET 0x0UL
@ -204,6 +202,8 @@ int arch_make_page_accessible(struct page *page);
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
#endif /* !__ASSEMBLY__ */
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>

View File

@ -23,7 +23,31 @@
#define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT)
#define _PIF_FTRACE_FULL_REGS BIT(PIF_FTRACE_FULL_REGS)
#ifndef __ASSEMBLY__
#define PSW32_MASK_PER _AC(0x40000000, UL)
#define PSW32_MASK_DAT _AC(0x04000000, UL)
#define PSW32_MASK_IO _AC(0x02000000, UL)
#define PSW32_MASK_EXT _AC(0x01000000, UL)
#define PSW32_MASK_KEY _AC(0x00F00000, UL)
#define PSW32_MASK_BASE _AC(0x00080000, UL) /* Always one */
#define PSW32_MASK_MCHECK _AC(0x00040000, UL)
#define PSW32_MASK_WAIT _AC(0x00020000, UL)
#define PSW32_MASK_PSTATE _AC(0x00010000, UL)
#define PSW32_MASK_ASC _AC(0x0000C000, UL)
#define PSW32_MASK_CC _AC(0x00003000, UL)
#define PSW32_MASK_PM _AC(0x00000f00, UL)
#define PSW32_MASK_RI _AC(0x00000080, UL)
#define PSW32_ADDR_AMODE _AC(0x80000000, UL)
#define PSW32_ADDR_INSN _AC(0x7FFFFFFF, UL)
#define PSW32_DEFAULT_KEY ((PAGE_DEFAULT_ACC) << 20)
#define PSW32_ASC_PRIMARY _AC(0x00000000, UL)
#define PSW32_ASC_ACCREG _AC(0x00004000, UL)
#define PSW32_ASC_SECONDARY _AC(0x00008000, UL)
#define PSW32_ASC_HOME _AC(0x0000C000, UL)
#define PSW_DEFAULT_KEY ((PAGE_DEFAULT_ACC) << 52)
#define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \
PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_DAT)
@ -31,6 +55,8 @@
PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \
PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
#ifndef __ASSEMBLY__
struct psw_bits {
unsigned long : 1;
unsigned long per : 1; /* PER-Mask */
@ -71,30 +97,6 @@ enum {
&(*(struct psw_bits *)(&(__psw))); \
}))
#define PSW32_MASK_PER 0x40000000UL
#define PSW32_MASK_DAT 0x04000000UL
#define PSW32_MASK_IO 0x02000000UL
#define PSW32_MASK_EXT 0x01000000UL
#define PSW32_MASK_KEY 0x00F00000UL
#define PSW32_MASK_BASE 0x00080000UL /* Always one */
#define PSW32_MASK_MCHECK 0x00040000UL
#define PSW32_MASK_WAIT 0x00020000UL
#define PSW32_MASK_PSTATE 0x00010000UL
#define PSW32_MASK_ASC 0x0000C000UL
#define PSW32_MASK_CC 0x00003000UL
#define PSW32_MASK_PM 0x00000f00UL
#define PSW32_MASK_RI 0x00000080UL
#define PSW32_ADDR_AMODE 0x80000000UL
#define PSW32_ADDR_INSN 0x7FFFFFFFUL
#define PSW32_DEFAULT_KEY (((u32)PAGE_DEFAULT_ACC) << 20)
#define PSW32_ASC_PRIMARY 0x00000000UL
#define PSW32_ASC_ACCREG 0x00004000UL
#define PSW32_ASC_SECONDARY 0x00008000UL
#define PSW32_ASC_HOME 0x0000C000UL
typedef struct {
unsigned int mask;
unsigned int addr;

View File

@ -31,7 +31,7 @@
struct cmbdata {
__u64 size;
__u64 elapsed_time;
/* basic and exended format: */
/* basic and extended format: */
__u64 ssch_rsch_count;
__u64 sample_count;
__u64 device_connect_time;

View File

@ -24,7 +24,7 @@
/*
* struct dasd_information2_t
* represents any data about the device, which is visible to userspace.
* including foramt and featueres.
* including format and featueres.
*/
typedef struct dasd_information2_t {
unsigned int devno; /* S/390 devno */

View File

@ -353,7 +353,7 @@ struct pkey_kblob2pkey2 {
* Is able to find out which type of secure key is given (CCA AES secure
* key, CCA AES cipher key, CCA ECC private key, EP11 AES key, EP11 ECC private
* key) and tries to find all matching crypto cards based on the MKVP and maybe
* other criterias (like CCA AES cipher keys need a CEX5C or higher, EP11 keys
* other criteria (like CCA AES cipher keys need a CEX5C or higher, EP11 keys
* with BLOB_PKEY_EXTRACTABLE need a CEX7 and EP11 api version 4). The list of
* APQNs is further filtered by the key's mkvp which needs to match to either
* the current mkvp (CCA and EP11) or the alternate mkvp (old mkvp, CCA adapters
@ -370,7 +370,7 @@ struct pkey_kblob2pkey2 {
* is empty (apqn_entries is 0) the apqn_entries field is updated to the number
* of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0
* but the number of apqn targets does not fit into the list, the apqn_targets
* field is updatedd with the number of reqired entries but there are no apqn
* field is updated with the number of required entries but there are no apqn
* values stored in the list and the ioctl returns with ENOSPC. If no matching
* APQN is found, the ioctl returns with 0 but the apqn_entries value is 0.
*/
@ -408,7 +408,7 @@ struct pkey_apqns4key {
* is empty (apqn_entries is 0) the apqn_entries field is updated to the number
* of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0
* but the number of apqn targets does not fit into the list, the apqn_targets
* field is updatedd with the number of reqired entries but there are no apqn
* field is updated with the number of required entries but there are no apqn
* values stored in the list and the ioctl returns with ENOSPC. If no matching
* APQN is found, the ioctl returns with 0 but the apqn_entries value is 0.
*/

View File

@ -166,6 +166,64 @@
#endif /* __s390x__ */
#ifndef __s390x__
#define PSW_MASK_PER _AC(0x40000000, UL)
#define PSW_MASK_DAT _AC(0x04000000, UL)
#define PSW_MASK_IO _AC(0x02000000, UL)
#define PSW_MASK_EXT _AC(0x01000000, UL)
#define PSW_MASK_KEY _AC(0x00F00000, UL)
#define PSW_MASK_BASE _AC(0x00080000, UL) /* always one */
#define PSW_MASK_MCHECK _AC(0x00040000, UL)
#define PSW_MASK_WAIT _AC(0x00020000, UL)
#define PSW_MASK_PSTATE _AC(0x00010000, UL)
#define PSW_MASK_ASC _AC(0x0000C000, UL)
#define PSW_MASK_CC _AC(0x00003000, UL)
#define PSW_MASK_PM _AC(0x00000F00, UL)
#define PSW_MASK_RI _AC(0x00000000, UL)
#define PSW_MASK_EA _AC(0x00000000, UL)
#define PSW_MASK_BA _AC(0x00000000, UL)
#define PSW_MASK_USER _AC(0x0000FF00, UL)
#define PSW_ADDR_AMODE _AC(0x80000000, UL)
#define PSW_ADDR_INSN _AC(0x7FFFFFFF, UL)
#define PSW_ASC_PRIMARY _AC(0x00000000, UL)
#define PSW_ASC_ACCREG _AC(0x00004000, UL)
#define PSW_ASC_SECONDARY _AC(0x00008000, UL)
#define PSW_ASC_HOME _AC(0x0000C000, UL)
#else /* __s390x__ */
#define PSW_MASK_PER _AC(0x4000000000000000, UL)
#define PSW_MASK_DAT _AC(0x0400000000000000, UL)
#define PSW_MASK_IO _AC(0x0200000000000000, UL)
#define PSW_MASK_EXT _AC(0x0100000000000000, UL)
#define PSW_MASK_BASE _AC(0x0000000000000000, UL)
#define PSW_MASK_KEY _AC(0x00F0000000000000, UL)
#define PSW_MASK_MCHECK _AC(0x0004000000000000, UL)
#define PSW_MASK_WAIT _AC(0x0002000000000000, UL)
#define PSW_MASK_PSTATE _AC(0x0001000000000000, UL)
#define PSW_MASK_ASC _AC(0x0000C00000000000, UL)
#define PSW_MASK_CC _AC(0x0000300000000000, UL)
#define PSW_MASK_PM _AC(0x00000F0000000000, UL)
#define PSW_MASK_RI _AC(0x0000008000000000, UL)
#define PSW_MASK_EA _AC(0x0000000100000000, UL)
#define PSW_MASK_BA _AC(0x0000000080000000, UL)
#define PSW_MASK_USER _AC(0x0000FF0180000000, UL)
#define PSW_ADDR_AMODE _AC(0x0000000000000000, UL)
#define PSW_ADDR_INSN _AC(0xFFFFFFFFFFFFFFFF, UL)
#define PSW_ASC_PRIMARY _AC(0x0000000000000000, UL)
#define PSW_ASC_ACCREG _AC(0x0000400000000000, UL)
#define PSW_ASC_SECONDARY _AC(0x0000800000000000, UL)
#define PSW_ASC_HOME _AC(0x0000C00000000000, UL)
#endif /* __s390x__ */
#define NUM_GPRS 16
#define NUM_FPRS 16
#define NUM_CRS 16
@ -214,69 +272,6 @@ typedef struct {
unsigned long addr;
} __attribute__ ((aligned(8))) psw_t;
#ifndef __s390x__
#define PSW_MASK_PER 0x40000000UL
#define PSW_MASK_DAT 0x04000000UL
#define PSW_MASK_IO 0x02000000UL
#define PSW_MASK_EXT 0x01000000UL
#define PSW_MASK_KEY 0x00F00000UL
#define PSW_MASK_BASE 0x00080000UL /* always one */
#define PSW_MASK_MCHECK 0x00040000UL
#define PSW_MASK_WAIT 0x00020000UL
#define PSW_MASK_PSTATE 0x00010000UL
#define PSW_MASK_ASC 0x0000C000UL
#define PSW_MASK_CC 0x00003000UL
#define PSW_MASK_PM 0x00000F00UL
#define PSW_MASK_RI 0x00000000UL
#define PSW_MASK_EA 0x00000000UL
#define PSW_MASK_BA 0x00000000UL
#define PSW_MASK_USER 0x0000FF00UL
#define PSW_ADDR_AMODE 0x80000000UL
#define PSW_ADDR_INSN 0x7FFFFFFFUL
#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20)
#define PSW_ASC_PRIMARY 0x00000000UL
#define PSW_ASC_ACCREG 0x00004000UL
#define PSW_ASC_SECONDARY 0x00008000UL
#define PSW_ASC_HOME 0x0000C000UL
#else /* __s390x__ */
#define PSW_MASK_PER 0x4000000000000000UL
#define PSW_MASK_DAT 0x0400000000000000UL
#define PSW_MASK_IO 0x0200000000000000UL
#define PSW_MASK_EXT 0x0100000000000000UL
#define PSW_MASK_BASE 0x0000000000000000UL
#define PSW_MASK_KEY 0x00F0000000000000UL
#define PSW_MASK_MCHECK 0x0004000000000000UL
#define PSW_MASK_WAIT 0x0002000000000000UL
#define PSW_MASK_PSTATE 0x0001000000000000UL
#define PSW_MASK_ASC 0x0000C00000000000UL
#define PSW_MASK_CC 0x0000300000000000UL
#define PSW_MASK_PM 0x00000F0000000000UL
#define PSW_MASK_RI 0x0000008000000000UL
#define PSW_MASK_EA 0x0000000100000000UL
#define PSW_MASK_BA 0x0000000080000000UL
#define PSW_MASK_USER 0x0000FF0180000000UL
#define PSW_ADDR_AMODE 0x0000000000000000UL
#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52)
#define PSW_ASC_PRIMARY 0x0000000000000000UL
#define PSW_ASC_ACCREG 0x0000400000000000UL
#define PSW_ASC_SECONDARY 0x0000800000000000UL
#define PSW_ASC_HOME 0x0000C00000000000UL
#endif /* __s390x__ */
/*
* The s390_regs structure is used to define the elf_gregset_t.
*/

View File

@ -122,7 +122,6 @@ int main(void)
OFFSET(__LC_LAST_UPDATE_TIMER, lowcore, last_update_timer);
OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock);
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
OFFSET(__LC_BOOT_CLOCK, lowcore, boot_clock);
OFFSET(__LC_CURRENT, lowcore, current_task);
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);

View File

@ -16,10 +16,10 @@
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/cpcmd.h>
#include <asm/io.h>
static DEFINE_SPINLOCK(cpcmd_lock);
static char cpcmd_buf[241];

View File

@ -24,8 +24,8 @@
#include <linux/kdebug.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <linux/io.h>
#include <asm/dis.h>
#include <asm/io.h>
#include <asm/cpcmd.h>
#include <asm/lowcore.h>
#include <asm/debug.h>
@ -516,7 +516,7 @@ void show_code(struct pt_regs *regs)
if (copy_from_regs(regs, code + end, (void *)addr, 2))
break;
}
/* Code snapshot useable ? */
/* Code snapshot usable ? */
if ((regs->psw.addr & 1) || start >= end) {
printk("%s Code: Bad PSW.\n", mode);
return;

View File

@ -136,7 +136,7 @@ _LPP_OFFSET = __LC_LPP
clgfrl %r14,.Lrange_size\@
jhe \outside_label
.section .rodata, "a"
.align 4
.balign 4
.Lrange_size\@:
.long \end - \start
.previous
@ -488,7 +488,6 @@ SYM_FUNC_END(psw_idle)
* Machine check handler routines
*/
SYM_CODE_START(mcck_int_handler)
stckf __LC_MCCK_CLOCK
BPOFF
la %r1,4095 # validate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
@ -598,8 +597,9 @@ SYM_CODE_START(restart_int_handler)
TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
jz 0f
lctlg %c0,%c15,__LC_CREGS_SAVE_AREA
0: larl %r15,stosm_tmp
stosm 0(%r15),0x04 # turn dat on, keep irqs off
0: larl %r15,daton_psw
lpswe 0(%r15) # turn dat on, keep irqs off
.Ldaton:
lg %r15,__LC_RESTART_STACK
xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
@ -646,7 +646,11 @@ SYM_CODE_END(stack_overflow)
.balign 4
SYM_DATA_LOCAL(stop_lock, .long 0)
SYM_DATA_LOCAL(this_cpu, .short 0)
SYM_DATA_LOCAL(stosm_tmp, .byte 0)
.balign 8
SYM_DATA_START_LOCAL(daton_psw)
.quad PSW_KERNEL_BITS
.quad .Ldaton
SYM_DATA_END(daton_psw)
.section .rodata, "a"
#define SYSCALL(esame,emu) .quad __s390x_ ## esame

View File

@ -36,5 +36,5 @@ SYM_CODE_START(startup_continue)
lpswe dw_psw-.(%r13) # load disabled wait psw
SYM_CODE_END(startup_continue)
.align 16
.balign 16
SYM_DATA_LOCAL(dw_psw, .quad 0x0002000180000000,0x0000000000000000)

View File

@ -13,7 +13,7 @@
* would be in the data section instead.
*/
.section .kprobes.text, "ax"
.align 4096
.balign 4096
SYM_CODE_START(kprobes_insn_page)
.rept 2048
.word 0x07fe

View File

@ -14,7 +14,7 @@ static int __init nobp_setup_early(char *str)
return rc;
if (enabled && test_facility(82)) {
/*
* The user explicitely requested nobp=1, enable it and
* The user explicitly requested nobp=1, enable it and
* disable the expoline support.
*/
__set_facility(82, alt_stfle_fac_list);

View File

@ -172,9 +172,9 @@ static void cpum_cf_free_root(void)
cpu_cf_root.cfptr = NULL;
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
on_each_cpu(cpum_cf_reset_cpu, NULL, 1);
debug_sprintf_event(cf_dbg, 4, "%s2 root.refcnt %u cfptr %px\n",
debug_sprintf_event(cf_dbg, 4, "%s root.refcnt %u cfptr %d\n",
__func__, refcount_read(&cpu_cf_root.refcnt),
cpu_cf_root.cfptr);
!cpu_cf_root.cfptr);
}
/*
@ -975,10 +975,6 @@ static int cfdiag_push_sample(struct perf_event *event,
}
overflow = perf_event_overflow(event, &data, &regs);
debug_sprintf_event(cf_dbg, 3,
"%s event %#llx sample_type %#llx raw %d ov %d\n",
__func__, event->hw.config,
event->attr.sample_type, raw.size, overflow);
if (overflow)
event->pmu->stop(event, 0);
@ -1105,10 +1101,6 @@ static int cpum_cf_online_cpu(unsigned int cpu)
{
int rc = 0;
debug_sprintf_event(cf_dbg, 4, "%s cpu %d root.refcnt %d "
"opencnt %d\n", __func__, cpu,
refcount_read(&cpu_cf_root.refcnt),
refcount_read(&cfset_opencnt));
/*
* Ignore notification for perf_event_open().
* Handle only /dev/hwctr device sessions.
@ -1127,9 +1119,6 @@ static int cfset_offline_cpu(unsigned int cpu);
static int cpum_cf_offline_cpu(unsigned int cpu)
{
debug_sprintf_event(cf_dbg, 4, "%s cpu %d root.refcnt %d opencnt %d\n",
__func__, cpu, refcount_read(&cpu_cf_root.refcnt),
refcount_read(&cfset_opencnt));
/*
* During task exit processing of grouped perf events triggered by CPU
* hotplug processing, pmu_disable() is called as part of perf context
@ -1337,8 +1326,6 @@ static void cfset_ioctl_off(void *parm)
cpuhw->state, S390_HWCTR_DEVICE, rc);
if (!cpuhw->dev_state)
cpuhw->flags &= ~PMU_F_IN_USE;
debug_sprintf_event(cf_dbg, 4, "%s rc %d state %#llx dev_state %#llx\n",
__func__, rc, cpuhw->state, cpuhw->dev_state);
}
/* Start counter sets on particular CPU */
@ -1360,8 +1347,6 @@ static void cfset_ioctl_on(void *parm)
else
pr_err("Counter set start %#llx of /dev/%s failed rc=%i\n",
cpuhw->dev_state | cpuhw->state, S390_HWCTR_DEVICE, rc);
debug_sprintf_event(cf_dbg, 4, "%s rc %d state %#llx dev_state %#llx\n",
__func__, rc, cpuhw->state, cpuhw->dev_state);
}
static void cfset_release_cpu(void *p)
@ -1369,8 +1354,6 @@ static void cfset_release_cpu(void *p)
struct cpu_cf_events *cpuhw = this_cpu_cfhw();
int rc;
debug_sprintf_event(cf_dbg, 4, "%s state %#llx dev_state %#llx\n",
__func__, cpuhw->state, cpuhw->dev_state);
cpuhw->dev_state = 0;
rc = lcctl(cpuhw->state); /* Keep perf_event_open counter sets */
if (rc)
@ -1459,7 +1442,6 @@ static int cfset_all_start(struct cfset_request *req)
if (atomic_read(&p.cpus_ack) != cpumask_weight(mask)) {
on_each_cpu_mask(mask, cfset_ioctl_off, &p, 1);
rc = -EIO;
debug_sprintf_event(cf_dbg, 4, "%s CPUs missing", __func__);
}
free_cpumask_var(mask);
return rc;
@ -1516,8 +1498,6 @@ static int cfset_all_copy(unsigned long arg, cpumask_t *mask)
if (put_user(cpus, &ctrset_read->no_cpus))
rc = -EFAULT;
out:
debug_sprintf_event(cf_dbg, 4, "%s rc %d copied %ld\n", __func__, rc,
uptr - (void __user *)ctrset_read->data);
return rc;
}
@ -1565,8 +1545,6 @@ static void cfset_cpu_read(void *parm)
cpuhw->used += space;
cpuhw->sets += 1;
}
debug_sprintf_event(cf_dbg, 4, "%s sets %d used %zd\n", __func__,
cpuhw->sets, cpuhw->used);
}
}
@ -1661,8 +1639,6 @@ static long cfset_ioctl_start(unsigned long arg, struct file *file)
if (!ret) {
cfset_session_add(preq);
file->private_data = preq;
debug_sprintf_event(cf_dbg, 4, "%s set %#lx need %ld ret %d\n",
__func__, preq->ctrset, need, ret);
} else {
kfree(preq);
}
@ -1761,8 +1737,6 @@ static int cfset_offline_cpu(unsigned int cpu)
static void cfdiag_read(struct perf_event *event)
{
debug_sprintf_event(cf_dbg, 3, "%s event %#llx count %ld\n", __func__,
event->attr.config, local64_read(&event->count));
}
static int get_authctrsets(void)
@ -1807,8 +1781,6 @@ static int cfdiag_event_init2(struct perf_event *event)
if (!event->hw.config_base)
err = -EINVAL;
debug_sprintf_event(cf_dbg, 5, "%s err %d config_base %#lx\n",
__func__, err, event->hw.config_base);
return err;
}

View File

@ -22,7 +22,7 @@
#include <asm/irq.h>
#include <asm/debug.h>
#include <asm/timex.h>
#include <asm-generic/io.h>
#include <linux/io.h>
/* Minimum number of sample-data-block-tables:
* At least one table is required for the sampling buffer structure.
@ -43,7 +43,7 @@
#define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8)
static inline int require_table_link(const void *sdbt)
{
return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET;
return ((unsigned long)sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET;
}
/* Minimum and maximum sampling buffer sizes:
@ -192,7 +192,7 @@ static void free_sampling_buffer(struct sf_buffer *sfb)
if (is_link_entry(curr)) {
curr = get_next_sdbt(curr);
if (sdbt)
free_page((unsigned long) sdbt);
free_page((unsigned long)sdbt);
/* If the origin is reached, sampling buffer is freed */
if (curr == sfb->sdbt)
@ -278,7 +278,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
for (i = 0; i < num_sdb; i++) {
/* Allocate a new SDB-table if it is full. */
if (require_table_link(tail)) {
new = (unsigned long *) get_zeroed_page(gfp_flags);
new = (unsigned long *)get_zeroed_page(gfp_flags);
if (!new) {
rc = -ENOMEM;
break;
@ -304,7 +304,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
*/
if (tail_prev) {
sfb->num_sdbt--;
free_page((unsigned long) new);
free_page((unsigned long)new);
tail = tail_prev;
}
break;
@ -343,7 +343,7 @@ static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb)
return -EINVAL;
/* Allocate the sample-data-block-table origin */
sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL);
sfb->sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!sfb->sdbt)
return -ENOMEM;
sfb->num_sdb = 0;
@ -594,11 +594,10 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
#define PMC_FAILURE 2
static void setup_pmc_cpu(void *flags)
{
int err;
struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf);
int err = 0;
err = 0;
switch (*((int *) flags)) {
switch (*((int *)flags)) {
case PMC_INIT:
memset(cpusf, 0, sizeof(*cpusf));
err = qsi(&cpusf->qsi);
@ -606,22 +605,18 @@ static void setup_pmc_cpu(void *flags)
break;
cpusf->flags |= PMU_F_RESERVED;
err = sf_disable();
if (err)
pr_err("Switching off the sampling facility failed "
"with rc %i\n", err);
break;
case PMC_RELEASE:
cpusf->flags &= ~PMU_F_RESERVED;
err = sf_disable();
if (err) {
pr_err("Switching off the sampling facility failed "
"with rc %i\n", err);
} else
if (!err)
deallocate_buffers(cpusf);
break;
}
if (err)
*((int *) flags) |= PMC_FAILURE;
if (err) {
*((int *)flags) |= PMC_FAILURE;
pr_err("Switching off the sampling facility failed with rc %i\n", err);
}
}
static void release_pmc_hardware(void)
@ -963,10 +958,6 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
return -ENOENT;
}
/* Check online status of the CPU to which the event is pinned */
if (event->cpu >= 0 && !cpu_online(event->cpu))
return -ENODEV;
/* Force reset of idle/hv excludes regardless of what the
* user requested.
*/
@ -1026,8 +1017,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
err = lsctl(&cpuhw->lsctl);
if (err) {
cpuhw->flags &= ~PMU_F_ENABLED;
pr_err("Loading sampling controls failed: op %i err %i\n",
1, err);
pr_err("Loading sampling controls failed: op 1 err %i\n", err);
return;
}
@ -1061,8 +1051,7 @@ static void cpumsf_pmu_disable(struct pmu *pmu)
err = lsctl(&inactive);
if (err) {
pr_err("Loading sampling controls failed: op %i err %i\n",
2, err);
pr_err("Loading sampling controls failed: op 2 err %i\n", err);
return;
}
@ -1221,7 +1210,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
te = trailer_entry_ptr((unsigned long)sdbt);
sample = (struct hws_basic_entry *)sdbt;
while ((unsigned long *) sample < (unsigned long *) te) {
while ((unsigned long *)sample < (unsigned long *)te) {
/* Check for an empty sample */
if (!sample->def || sample->LS)
break;
@ -1298,7 +1287,7 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
if (SAMPL_DIAG_MODE(&event->hw))
return;
sdbt = (unsigned long *) TEAR_REG(hwc);
sdbt = (unsigned long *)TEAR_REG(hwc);
done = event_overflow = sampl_overflow = num_sdb = 0;
while (!done) {
/* Get the trailer entry of the sample-data-block */
@ -1670,9 +1659,6 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
pr_err("The AUX buffer with %lu pages for the "
"diagnostic-sampling mode is full\n",
num_sdb);
debug_sprintf_event(sfdbg, 1,
"%s: AUX buffer used up\n",
__func__);
break;
}
if (WARN_ON_ONCE(!aux))
@ -1804,7 +1790,7 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages,
/* Allocate the first SDBT */
sfb->num_sdbt = 0;
sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL);
sfb->sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!sfb->sdbt)
goto no_sdbt;
aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)sfb->sdbt;
@ -1816,7 +1802,7 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages,
*/
for (i = 0; i < nr_pages; i++, tail++) {
if (require_table_link(tail)) {
new = (unsigned long *) get_zeroed_page(GFP_KERNEL);
new = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!new)
goto no_sdbt;
aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)new;
@ -1865,7 +1851,7 @@ static void cpumsf_pmu_read(struct perf_event *event)
/* Nothing to do ... updates are interrupt-driven */
}
/* Check if the new sampling period/freqeuncy is appropriate.
/* Check if the new sampling period/frequency is appropriate.
*
* Return non-zero on error and zero on passed checks.
*/
@ -1973,8 +1959,8 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
cpuhw->lsctl.interval = SAMPL_RATE(&event->hw);
if (!SAMPL_DIAG_MODE(&event->hw)) {
cpuhw->lsctl.tear = virt_to_phys(cpuhw->sfb.sdbt);
cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt;
TEAR_REG(&event->hw) = (unsigned long) cpuhw->sfb.sdbt;
cpuhw->lsctl.dear = *(unsigned long *)cpuhw->sfb.sdbt;
TEAR_REG(&event->hw) = (unsigned long)cpuhw->sfb.sdbt;
}
/* Ensure sampling functions are in the disabled state. If disabled,

View File

@ -84,7 +84,7 @@ static int paiext_root_alloc(void)
/* The memory is already zeroed. */
paiext_root.mapptr = alloc_percpu(struct paiext_mapptr);
if (!paiext_root.mapptr) {
/* Returing without refcnt adjustment is ok. The
/* Returning without refcnt adjustment is ok. The
* error code is handled by paiext_alloc() which
* decrements refcnt when an event can not be
* created.
@ -190,7 +190,7 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
: PAI_MODE_COUNTING;
} else {
/* Multiple invocation, check whats active.
/* Multiple invocation, check what is active.
* Supported are multiple counter events or only one sampling
* event concurrently at any one time.
*/

View File

@ -30,8 +30,8 @@
#include <linux/export.h>
#include <linux/init_task.h>
#include <linux/entry-common.h>
#include <linux/io.h>
#include <asm/cpu_mf.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/vtimer.h>
#include <asm/exec.h>

View File

@ -529,7 +529,7 @@ static void __init setup_resources(void)
res->start = start;
/*
* In memblock, end points to the first byte after the
* range while in resourses, end points to the last byte in
* range while in resources, end points to the last byte in
* the range.
*/
res->end = end - 1;

View File

@ -113,7 +113,7 @@ early_param("smt", early_smt);
/*
* The smp_cpu_state_mutex must be held when changing the state or polarization
* member of a pcpu data structure within the pcpu_devices arreay.
* member of a pcpu data structure within the pcpu_devices array.
*/
DEFINE_MUTEX(smp_cpu_state_mutex);

View File

@ -702,7 +702,7 @@ static void stp_work_fn(struct work_struct *work)
if (!check_sync_clock())
/*
* There is a usable clock but the synchonization failed.
* There is a usable clock but the synchronization failed.
* Retry after a second.
*/
mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC));

View File

@ -19,6 +19,7 @@ KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
KBUILD_AFLAGS_32 += -m31 -s
KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_32 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
LDFLAGS_vdso32.so.dbg += -fPIC -shared -soname=linux-vdso32.so.1 \
@ -40,8 +41,11 @@ KCSAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
quiet_cmd_vdso_and_check = VDSO $@
cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check)
$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
$(call if_changed,ld)
$(call if_changed,vdso_and_check)
# strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S

View File

@ -24,6 +24,7 @@ KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
KBUILD_AFLAGS_64 += -m64
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_64))
KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin
ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \
--hash-style=both --build-id=sha1 -T
@ -44,9 +45,12 @@ KCSAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
quiet_cmd_vdso_and_check = VDSO $@
cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check)
# link rule for the .so file, .lds has to be first
$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) FORCE
$(call if_changed,ld)
$(call if_changed,vdso_and_check)
# strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S

View File

@ -478,7 +478,7 @@ struct trans_exc_code_bits {
};
enum {
FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
FSI_UNKNOWN = 0, /* Unknown whether fetch or store */
FSI_STORE = 1, /* Exception was due to store operation */
FSI_FETCH = 2 /* Exception was due to fetch operation */
};
@ -625,7 +625,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
* Returns: - zero on success; @gpa contains the resulting absolute address
* - a negative value if guest access failed due to e.g. broken
* guest mapping
* - a positve value if an access exception happened. In this case
* - a positive value if an access exception happened. In this case
* the returned value is the program interruption code as defined
* by the architecture
*/

View File

@ -630,7 +630,7 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
/* process PER, also if the instrution is processed in user space */
/* process PER, also if the instruction is processed in user space */
if (vcpu->arch.sie_block->icptstatus & 0x02 &&
(!rc || rc == -EOPNOTSUPP))
per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);

View File

@ -4161,7 +4161,7 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
vcpu->run->s.regs.fpc = 0;
/*
* Do not reset these registers in the protected case, as some of
* them are overlayed and they are not accessible in this case
* them are overlaid and they are not accessible in this case
* anyway.
*/
if (!kvm_s390_pv_cpu_is_protected(vcpu)) {

View File

@ -427,7 +427,7 @@ static void kvm_s390_pci_dev_release(struct zpci_dev *zdev)
/*
* Register device with the specified KVM. If interpetation facilities are
* Register device with the specified KVM. If interpretation facilities are
* available, enable them and let userspace indicate whether or not they will
* be used (specify SHM bit to disable).
*/

View File

@ -13,7 +13,7 @@
#include <linux/errno.h>
#include <linux/mm_types.h>
#include <linux/pgtable.h>
#include <linux/io.h>
#include <asm/asm-offsets.h>
#include <asm/facility.h>
#include <asm/current.h>
@ -22,7 +22,6 @@
#include <asm/sysinfo.h>
#include <asm/page-states.h>
#include <asm/gmap.h>
#include <asm/io.h>
#include <asm/ptrace.h>
#include <asm/sclp.h>
#include <asm/ap.h>

View File

@ -273,7 +273,7 @@ static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc)
uvcb.header.rc, uvcb.header.rrc);
WARN_ONCE(cc, "protvirt destroy vm fast failed handle %llx rc %x rrc %x",
kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc);
/* Inteded memory leak on "impossible" error */
/* Intended memory leak on "impossible" error */
if (!cc)
kvm_s390_pv_dealloc_vm(kvm);
return cc ? -EIO : 0;

View File

@ -469,7 +469,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
*
* This interception will occur at the source cpu when a source cpu sends an
* external call to a target cpu and the target cpu has the WAIT bit set in
* its cpuflags. Interception will occurr after the interrupt indicator bits at
* its cpuflags. Interception will occur after the interrupt indicator bits at
* the target cpu have been set. All error cases will lead to instruction
* interception, therefore nothing is to be checked or prepared.
*/

View File

@ -504,7 +504,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->mso = new_mso;
scb_s->prefix = new_prefix;
/* We have to definetly flush the tlb if this scb never ran */
/* We have to definitely flush the tlb if this scb never ran */
if (scb_s->ihcpu != 0xffffU)
scb_s->ihcpu = scb_o->ihcpu;
@ -901,7 +901,7 @@ static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
(vaddr & 0xfffffffffffff000UL) |
/* 52-53: store / fetch */
(((unsigned int) !write_flag) + 1) << 10,
/* 62-63: asce id (alway primary == 0) */
/* 62-63: asce id (always primary == 0) */
.exc_access_id = 0, /* always primary */
.op_access_id = 0, /* not MVPG */
};

View File

@ -13,8 +13,8 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <linux/io.h>
#include <asm/alternative.h>
#include <asm/io.h>
int spin_retry = -1;

View File

@ -1740,7 +1740,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow);
* The r2t parameter specifies the address of the source table. The
* four pages of the source table are made read-only in the parent gmap
* address space. A write to the source table area @r2t will automatically
* remove the shadow r2 table and all of its decendents.
* remove the shadow r2 table and all of its descendants.
*
* Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
* shadow table structure is incomplete, -ENOMEM if out of memory and

View File

@ -13,9 +13,9 @@
#include <linux/gfp.h>
#include <linux/cpu.h>
#include <linux/uio.h>
#include <linux/io.h>
#include <asm/asm-extable.h>
#include <asm/ctl_reg.h>
#include <asm/io.h>
#include <asm/abs_lowcore.h>
#include <asm/stacktrace.h>
#include <asm/maccess.h>

View File

@ -481,6 +481,7 @@ static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
*/
static int vmem_add_range(unsigned long start, unsigned long size)
{
start = (unsigned long)__va(start);
return add_pagetable(start, start + size, true);
}
@ -489,6 +490,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
*/
static void vmem_remove_range(unsigned long start, unsigned long size)
{
start = (unsigned long)__va(start);
remove_pagetable(start, start + size, true);
}
@ -556,7 +558,7 @@ int vmem_add_mapping(unsigned long start, unsigned long size)
* to any physical address. If missing, allocate segment- and region-
* table entries along. Meeting a large segment- or region-table entry
* while traversing is an error, since the function is expected to be
* called against virtual regions reserverd for 4KB mappings only.
* called against virtual regions reserved for 4KB mappings only.
*/
pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
{

View File

@ -523,12 +523,12 @@ extern const char bpf_plt_end[];
#define BPF_PLT_SIZE 32
asm(
".pushsection .rodata\n"
" .align 8\n"
" .balign 8\n"
"bpf_plt:\n"
" lgrl %r0,bpf_plt_ret\n"
" lgrl %r1,bpf_plt_target\n"
" br %r1\n"
" .align 8\n"
" .balign 8\n"
"bpf_plt_ret: .quad 0\n"
"bpf_plt_target: .quad 0\n"
"bpf_plt_end:\n"

View File

@ -163,7 +163,7 @@ static void zpci_handle_cpu_local_irq(bool rescan)
if (!rescan || irqs_on++)
/* End of second scan with interrupts on. */
break;
/* First scan complete, reenable interrupts. */
/* First scan complete, re-enable interrupts. */
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &iib))
break;
bit = 0;
@ -202,7 +202,7 @@ static void zpci_handle_fallback_irq(void)
if (irqs_on++)
/* End of second scan with interrupts on. */
break;
/* First scan complete, reenable interrupts. */
/* First scan complete, re-enable interrupts. */
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib))
break;
cpu = 0;
@ -247,7 +247,7 @@ static void zpci_floating_irq_handler(struct airq_struct *airq,
if (irqs_on++)
/* End of second scan with interrupts on. */
break;
/* First scan complete, reenable interrupts. */
/* First scan complete, re-enable interrupts. */
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib))
break;
si = 0;

View File

@ -100,7 +100,7 @@ SYM_CODE_START(purgatory_start)
* checksum verification only (%r2 = 0 -> verification only).
*
* Check now and preserve over C function call by storing in
* %r10 whith
* %r10 with
* 1 -> checksum verification only
* 0 -> load new kernel
*/

View File

@ -92,17 +92,6 @@ config ZCRYPT_DEBUG
If unsure, say N.
config ZCRYPT_MULTIDEVNODES
bool "Support for multiple zcrypt device nodes"
default y
depends on S390
depends on ZCRYPT
help
With this option enabled the zcrypt device driver can
provide multiple devices nodes in /dev. Each device
node can get customized to limit access and narrow
down the use of the available crypto hardware.
config PKEY
tristate "Kernel API for protected key handling"
depends on S390

View File

@ -24,7 +24,7 @@
#include <asm/debug.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/vtoc.h>

View File

@ -21,13 +21,13 @@
#include <linux/compat.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/css_chars.h>
#include <asm/debug.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/itcw.h>

View File

@ -16,10 +16,10 @@
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/ccwdev.h>
#include "dasd_int.h"

View File

@ -20,8 +20,8 @@
#include <linux/pfn_t.h>
#include <linux/uio.h>
#include <linux/dax.h>
#include <linux/io.h>
#include <asm/extmem.h>
#include <asm/io.h>
#define DCSSBLK_NAME "dcssblk"
#define DCSSBLK_MINORS_PER_DISK 1

View File

@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/io.h>
#include <linux/io.h>
#include <asm/ebcdic.h>
#include <linux/uaccess.h>
#include <asm/delay.h>

View File

@ -22,8 +22,8 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/appldata.h>
#include <asm/monwriter.h>

View File

@ -152,7 +152,7 @@ static ssize_t ccwgroup_online_show(struct device *dev,
/*
* Provide an 'ungroup' attribute so the user can remove group devices no
* longer needed or accidentially created. Saves memory :)
* longer needed or accidentally created. Saves memory :)
*/
static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
{

View File

@ -943,7 +943,7 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev,
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schib.pmcw.dev, rc);
if (old_enabled) {
/* Try to reenable the old subchannel. */
/* Try to re-enable the old subchannel. */
spin_lock_irq(old_sch->lock);
cio_enable_subchannel(old_sch, (u32)virt_to_phys(old_sch));
spin_unlock_irq(old_sch->lock);

View File

@ -310,7 +310,7 @@ static void ccw_device_oper_notify(struct ccw_device *cdev)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
/* Reenable channel measurements, if needed. */
/* Re-enable channel measurements, if needed. */
ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
/* Save indication for new paths. */
cdev->private->path_new_mask = sch->vpm;
@ -947,7 +947,7 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev)
*/
sch->lpm = sch->schib.pmcw.pam & sch->opm;
/*
* Use the initial configuration since we can't be shure that the old
* Use the initial configuration since we can't be sure that the old
* paths are valid.
*/
io_subchannel_init_config(sch);

View File

@ -672,7 +672,7 @@ out_init:
/*
* Fetch one ccw.
* To reduce memory copy, we'll pin the cda page in memory,
* and to get rid of the cda 2G limitiaion of ccw1, we'll translate
* and to get rid of the cda 2G limitation of ccw1, we'll translate
* direct ccws to idal ccws.
*/
static int ccwchain_fetch_one(struct ccw1 *ccw,
@ -787,7 +787,7 @@ void cp_free(struct channel_program *cp)
* program.
*
* These APIs will copy the ccws into kernel-space buffers, and update
* the guest phsical addresses with their corresponding host physical
* the guest physical addresses with their corresponding host physical
* addresses. Then channel I/O device drivers could issue the
* translated channel program to real devices to perform an I/O
* operation.

View File

@ -497,7 +497,7 @@ static void ap_tasklet_fn(unsigned long dummy)
enum ap_sm_wait wait = AP_SM_WAIT_NONE;
/* Reset the indicator if interrupts are used. Thus new interrupts can
* be received. Doing it in the beginning of the tasklet is therefor
* be received. Doing it in the beginning of the tasklet is therefore
* important that no requests on any AP get lost.
*/
if (ap_irq_flag)
@ -2289,7 +2289,7 @@ static int __init ap_module_init(void)
timer_setup(&ap_config_timer, ap_config_timeout, 0);
/*
* Setup the high resultion poll timer.
* Setup the high resolution poll timer.
* If we are running under z/VM adjust polling to z/VM polling rate.
*/
if (MACHINE_IS_VM)

View File

@ -233,30 +233,6 @@ struct ap_queue {
typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue);
/* failure injection cmd struct */
struct ap_fi {
union {
u16 cmd; /* fi flags + action */
struct {
u8 flags; /* fi flags only */
u8 action; /* fi action only */
};
};
};
/* all currently known fi actions */
enum ap_fi_actions {
AP_FI_ACTION_CCA_AGENT_FF = 0x01,
AP_FI_ACTION_CCA_DOM_INVAL = 0x02,
AP_FI_ACTION_NQAP_QID_INVAL = 0x03,
};
/* all currently known fi flags */
enum ap_fi_flags {
AP_FI_FLAG_NO_RETRY = 0x01,
AP_FI_FLAG_TOGGLE_SPECIAL = 0x02,
};
struct ap_message {
struct list_head list; /* Request queueing. */
unsigned long psmid; /* Message id. */
@ -264,7 +240,6 @@ struct ap_message {
size_t len; /* actual msg len in msg buffer */
size_t bufsize; /* allocated msg buffer size */
u16 flags; /* Flags, see AP_MSG_FLAG_xxx */
struct ap_fi fi; /* Failure Injection cmd */
int rc; /* Return code for this message */
void *private; /* ap driver private pointer. */
/* receive is called from tasklet context */
@ -384,7 +359,7 @@ int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
* like "+1-16,-32,-0x40,+128" where only single bits or ranges of
* bits are cleared or set. Distinction is done based on the very
* first character which may be '+' or '-' for the relative string
* and othewise assume to be an absolute value string. If parsing fails
* and otherwise assume to be an absolute value string. If parsing fails
* a negative errno value is returned. All arguments and bitmaps are
* big endian order.
*/

View File

@ -274,13 +274,6 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
/* Start the next request on the queue. */
ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
#ifdef CONFIG_ZCRYPT_DEBUG
if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) {
AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n",
__func__, ap_msg->fi.cmd);
qid = 0xFF00;
}
#endif
status = __ap_send(qid, ap_msg->psmid,
ap_msg->msg, ap_msg->len,
ap_msg->flags & AP_MSG_FLAG_SPECIAL);

View File

@ -445,7 +445,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
q->saved_isc = isc;
break;
case AP_RESPONSE_OTHERWISE_CHANGED:
/* We could not modify IRQ setings: clear new configuration */
/* We could not modify IRQ settings: clear new configuration */
vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
kvm_s390_gisc_unregister(kvm, isc);
break;
@ -524,7 +524,7 @@ static void vfio_ap_le_guid_to_be_uuid(guid_t *guid, unsigned long *uuid)
* Response.status may be set to following Response Code:
* - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
* - AP_RESPONSE_DECONFIGURED: if the queue is not configured
* - AP_RESPONSE_NORMAL (0) : in case of successs
* - AP_RESPONSE_NORMAL (0) : in case of success
* Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
* We take the matrix_dev lock to ensure serialization on queues and
* mediated device access.

View File

@ -111,8 +111,6 @@ EXPORT_SYMBOL(zcrypt_msgtype);
* Multi device nodes extension functions.
*/
#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
struct zcdn_device;
static struct class *zcrypt_class;
@ -477,8 +475,6 @@ static void zcdn_destroy_all(void)
mutex_unlock(&ap_perms_mutex);
}
#endif
/*
* zcrypt_read (): Not supported beyond zcrypt 1.3.1.
*
@ -510,7 +506,6 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
{
struct ap_perms *perms = &ap_perms;
#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
if (filp->f_inode->i_cdev == &zcrypt_cdev) {
struct zcdn_device *zcdndev;
@ -522,7 +517,6 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
if (zcdndev)
perms = &zcdndev->perms;
}
#endif
filp->private_data = (void *)perms;
atomic_inc(&zcrypt_open_count);
@ -536,7 +530,6 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
*/
static int zcrypt_release(struct inode *inode, struct file *filp)
{
#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
if (filp->f_inode->i_cdev == &zcrypt_cdev) {
struct zcdn_device *zcdndev;
@ -549,7 +542,6 @@ static int zcrypt_release(struct inode *inode, struct file *filp)
put_device(&zcdndev->device);
}
}
#endif
atomic_dec(&zcrypt_open_count);
return 0;
@ -661,11 +653,6 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
ap_init_message(&ap_msg);
#ifdef CONFIG_ZCRYPT_DEBUG
if (tr && tr->fi.cmd)
ap_msg.fi.cmd = tr->fi.cmd;
#endif
if (mex->outputdatalength < mex->inputdatalength) {
func_code = 0;
rc = -EINVAL;
@ -687,7 +674,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
/* Check for usable accelarator or CCA card */
/* Check for usable accelerator or CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->functions & 0x18000000))
continue;
@ -771,11 +758,6 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
ap_init_message(&ap_msg);
#ifdef CONFIG_ZCRYPT_DEBUG
if (tr && tr->fi.cmd)
ap_msg.fi.cmd = tr->fi.cmd;
#endif
if (crt->outputdatalength < crt->inputdatalength) {
func_code = 0;
rc = -EINVAL;
@ -797,7 +779,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
/* Check for usable accelarator or CCA card */
/* Check for usable accelerator or CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->functions & 0x18000000))
continue;
@ -883,16 +865,6 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
xcrb->status = 0;
ap_init_message(&ap_msg);
#ifdef CONFIG_ZCRYPT_DEBUG
if (tr && tr->fi.cmd)
ap_msg.fi.cmd = tr->fi.cmd;
if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) {
ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n",
__func__, tr->fi.cmd);
xcrb->agent_ID = 0x4646;
}
#endif
rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
if (rc)
goto out;
@ -982,14 +954,6 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
if (*domain == AUTOSEL_DOM)
*domain = AP_QID_QUEUE(qid);
#ifdef CONFIG_ZCRYPT_DEBUG
if (tr && tr->fi.action == AP_FI_ACTION_CCA_DOM_INVAL) {
ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid domain\n",
__func__, tr->fi.cmd);
*domain = 99;
}
#endif
rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
spin_lock(&zcrypt_list_lock);
@ -1058,11 +1022,6 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
ap_init_message(&ap_msg);
#ifdef CONFIG_ZCRYPT_DEBUG
if (tr && tr->fi.cmd)
ap_msg.fi.cmd = tr->fi.cmd;
#endif
target_num = (unsigned short)xcrb->targets_num;
/* empty list indicates autoselect (all available targets) */
@ -1473,23 +1432,10 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
if (copy_from_user(&mex, umex, sizeof(mex)))
return -EFAULT;
#ifdef CONFIG_ZCRYPT_DEBUG
if (mex.inputdatalength & (1U << 31)) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
tr.fi.cmd = (u16)(mex.inputdatalength >> 16);
}
mex.inputdatalength &= 0x0000FFFF;
#endif
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
if (rc == -EAGAIN)
tr.again_counter++;
#ifdef CONFIG_ZCRYPT_DEBUG
if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
break;
#endif
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
@ -1518,23 +1464,10 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
if (copy_from_user(&crt, ucrt, sizeof(crt)))
return -EFAULT;
#ifdef CONFIG_ZCRYPT_DEBUG
if (crt.inputdatalength & (1U << 31)) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
tr.fi.cmd = (u16)(crt.inputdatalength >> 16);
}
crt.inputdatalength &= 0x0000FFFF;
#endif
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt);
if (rc == -EAGAIN)
tr.again_counter++;
#ifdef CONFIG_ZCRYPT_DEBUG
if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
break;
#endif
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
@ -1563,23 +1496,10 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
#ifdef CONFIG_ZCRYPT_DEBUG
if ((xcrb.status & 0x8000FFFF) == 0x80004649 /* 'FI' */) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
tr.fi.cmd = (u16)(xcrb.status >> 16);
}
xcrb.status = 0;
#endif
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
if (rc == -EAGAIN)
tr.again_counter++;
#ifdef CONFIG_ZCRYPT_DEBUG
if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
break;
#endif
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
@ -1609,23 +1529,10 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
#ifdef CONFIG_ZCRYPT_DEBUG
if (xcrb.req_len & (1ULL << 63)) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
tr.fi.cmd = (u16)(xcrb.req_len >> 48);
}
xcrb.req_len &= 0x0000FFFFFFFFFFFFULL;
#endif
do {
rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
if (rc == -EAGAIN)
tr.again_counter++;
#ifdef CONFIG_ZCRYPT_DEBUG
if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
break;
#endif
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
@ -1668,14 +1575,16 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
size_t total_size = MAX_ZDEV_ENTRIES_EXT
* sizeof(struct zcrypt_device_status_ext);
device_status = kzalloc(total_size, GFP_KERNEL);
device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
sizeof(struct zcrypt_device_status_ext),
GFP_KERNEL);
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask_ext(device_status);
if (copy_to_user((char __user *)arg, device_status,
total_size))
rc = -EFAULT;
kfree(device_status);
kvfree(device_status);
return rc;
}
case ZCRYPT_STATUS_MASK: {
@ -2144,8 +2053,6 @@ void zcrypt_debug_exit(void)
debug_unregister(zcrypt_dbf_info);
}
#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
static int __init zcdn_init(void)
{
int rc;
@ -2203,8 +2110,6 @@ static void zcdn_exit(void)
class_destroy(zcrypt_class);
}
#endif
/*
* zcrypt_api_init(): Module initialization.
*
@ -2218,11 +2123,9 @@ int __init zcrypt_api_init(void)
if (rc)
goto out;
#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
rc = zcdn_init();
if (rc)
goto out;
#endif
/* Register the request sprayer. */
rc = misc_register(&zcrypt_misc_device);
@ -2235,9 +2138,7 @@ int __init zcrypt_api_init(void)
return 0;
out_misc_register_failed:
#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
zcdn_exit();
#endif
zcrypt_debug_exit();
out:
return rc;
@ -2250,9 +2151,7 @@ out:
*/
void __exit zcrypt_api_exit(void)
{
#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
zcdn_exit();
#endif
misc_deregister(&zcrypt_misc_device);
zcrypt_msgtype6_exit();
zcrypt_msgtype50_exit();

View File

@ -60,9 +60,6 @@ struct zcrypt_track {
int again_counter; /* retry attempts counter */
int last_qid; /* last qid used */
int last_rc; /* last return code */
#ifdef CONFIG_ZCRYPT_DEBUG
struct ap_fi fi; /* failure injection cmd */
#endif
};
/* defines related to message tracking */

View File

@ -689,7 +689,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
goto out;
}
/* copy the tanslated protected key */
/* copy the translated protected key */
switch (prepparm->lv3.ckb.len) {
case 16 + 32:
/* AES 128 protected key */

View File

@ -115,7 +115,7 @@ struct eccprivkeytoken {
u64 mkvp; /* master key verification pattern */
u8 opk[48]; /* encrypted object protection key data */
u16 adatalen; /* associated data length in bytes */
u16 fseclen; /* formated section length in bytes */
u16 fseclen; /* formatted section length in bytes */
u8 more_data[]; /* more data follows */
} __packed;
@ -232,7 +232,7 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify);
* the number of apqns stored into the list is returned in *nr_apqns. One apqn
* entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
* may be casted to struct pkey_apqn. The return value is either 0 for success
* or a negative errno value. If no apqn meeting the criterias is found,
* or a negative errno value. If no apqn meeting the criteria is found,
* -ENODEV is returned.
*/
int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,

View File

@ -1368,7 +1368,7 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
goto out;
}
/* copy the tanslated protected key */
/* copy the translated protected key */
if (wki->pkeysize > *protkeylen) {
DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
__func__, wki->pkeysize, *protkeylen);

View File

@ -131,14 +131,14 @@ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
* - if minapi > 0 only apqns with API_ord_nr >= minapi
* - if wkvp != NULL only apqns where the wkvp (EP11_WKVPLEN bytes) matches
* to the first EP11_WKVPLEN bytes of the wkvp of the current wrapping
* key for this domain. When a wkvp is given there will aways be a re-fetch
* key for this domain. When a wkvp is given there will always be a re-fetch
* of the domain info for the potential apqn - so this triggers an request
* reply to each apqn eligible.
* The array of apqn entries is allocated with kmalloc and returned in *apqns;
* the number of apqns stored into the list is returned in *nr_apqns. One apqn
* entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
* may be casted to struct pkey_apqn. The return value is either 0 for success
* or a negative errno value. If no apqn meeting the criterias is found,
* or a negative errno value. If no apqn meeting the criteria is found,
* -ENODEV is returned.
*/
int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,

View File

@ -246,11 +246,6 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
copy_from_user(inp, mex->inputdata, mod_len))
return -EFAULT;
#ifdef CONFIG_ZCRYPT_DEBUG
if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
#endif
return 0;
}
@ -338,11 +333,6 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
copy_from_user(inp, crt->inputdata, mod_len))
return -EFAULT;
#ifdef CONFIG_ZCRYPT_DEBUG
if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
#endif
return 0;
}

View File

@ -425,11 +425,6 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg,
memcmp(function_code, "AU", 2) == 0)
ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
#ifdef CONFIG_ZCRYPT_DEBUG
if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
#endif
/* check CPRB minor version, set info bits in ap_message flag field */
switch (*(unsigned short *)(&msg->cprbx.func_id[0])) {
case 0x5432: /* "T2" */
@ -535,11 +530,6 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap
if (msg->cprbx.flags & 0x20)
ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
#ifdef CONFIG_ZCRYPT_DEBUG
if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
#endif
/* set info bits in ap_message flag field */
if (msg->cprbx.flags & 0x80)
ap_msg->flags |= AP_MSG_FLAG_ADMIN;
@ -1143,6 +1133,9 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
ap_cancel_message(zq->queue, ap_msg);
}
if (rc == -EAGAIN && ap_msg->flags & AP_MSG_FLAG_ADMIN)
rc = -EIO; /* do not retry administrative requests */
out:
if (rc)
ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",
@ -1263,6 +1256,9 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
ap_cancel_message(zq->queue, ap_msg);
}
if (rc == -EAGAIN && ap_msg->flags & AP_MSG_FLAG_ADMIN)
rc = -EIO; /* do not retry administrative requests */
out:
if (rc)
ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",

View File

@ -43,13 +43,13 @@
#include <linux/netdevice.h>
#include <net/dst.h>
#include <linux/io.h> /* instead of <asm/io.h> ok ? */
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include <linux/bitops.h> /* instead of <asm/bitops.h> ok ? */
#include <linux/uaccess.h> /* instead of <asm/uaccess.h> ok ? */
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/moduleparam.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include <asm/idals.h>
#include "ctcm_main.h"

View File

@ -47,7 +47,7 @@
#include <linux/ctype.h>
#include <net/dst.h>
#include <asm/io.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <asm/ebcdic.h>