x86: Fix various typos in comments, take #2

Fix another ~42 single-word typos in arch/x86/ code comments,
missed a few in the first pass, in particular in .S files.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: linux-kernel@vger.kernel.org
This commit is contained in:
Ingo Molnar 2021-03-21 22:28:53 +01:00
parent c681df88dc
commit 163b099146
41 changed files with 42 additions and 42 deletions

View file

@ -5,7 +5,7 @@
* Early support for invoking 32-bit EFI services from a 64-bit kernel.
*
* Because this thunking occurs before ExitBootServices() we have to
* restore the firmware's 32-bit GDT before we make EFI serivce calls,
* restore the firmware's 32-bit GDT before we make EFI service calls,
* since the firmware's 32-bit IDT is still currently installed and it
* needs to be able to service interrupts.
*

View file

@ -231,7 +231,7 @@ SYM_FUNC_START(startup_32)
/*
* Setup for the jump to 64bit mode
*
* When the jump is performend we will be in long mode but
* When the jump is performed we will be in long mode but
* in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
* (and in turn EFER.LMA = 1). To jump into 64bit mode we use
* the new gdt/idt that has __KERNEL_CS with CS.L = 1.

View file

@ -24,7 +24,7 @@
/*
* Copyright 2012 Xyratex Technology Limited
*
* Wrappers for kernel crypto shash api to pclmulqdq crc32 imlementation.
* Wrappers for kernel crypto shash api to pclmulqdq crc32 implementation.
*/
#include <linux/init.h>
#include <linux/module.h>

View file

@ -88,7 +88,7 @@
/*
* Combined G1 & G2 function. Reordered with help of rotates to have moves
* at begining.
* at beginning.
*/
#define g1g2_3(ab, cd, Tx0, Tx1, Tx2, Tx3, Ty0, Ty1, Ty2, Ty3, x, y) \
/* G1,1 && G2,1 */ \

View file

@ -209,7 +209,7 @@
*
* Lets build a 5 entry IRET frame after that, such that struct pt_regs
* is complete and in particular regs->sp is correct. This gives us
* the original 6 enties as gap:
* the original 6 entries as gap:
*
* 14*4(%esp) - <previous context>
* 13*4(%esp) - gap / flags

View file

@ -511,7 +511,7 @@ SYM_CODE_START(\asmsym)
/*
* No need to switch back to the IST stack. The current stack is either
* identical to the stack in the IRET frame or the VC fall-back stack,
* so it is definitly mapped even with PTI enabled.
* so it is definitely mapped even with PTI enabled.
*/
jmp paranoid_exit

View file

@ -218,7 +218,7 @@ int main(int argc, char **argv)
/*
* Figure out the struct name. If we're writing to a .so file,
* generate raw output insted.
* generate raw output instead.
*/
name = strdup(argv[3]);
namelen = strlen(name);

View file

@ -29,7 +29,7 @@ __kernel_vsyscall:
* anyone with an AMD CPU, for example). Nonetheless, we try to keep
* it working approximately as well as it ever worked.
*
* This link may eludicate some of the history:
* This link may elucidate some of the history:
* https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7
* personally, I find it hard to understand what's going on there.
*

View file

@ -358,7 +358,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
mmap_write_lock(mm);
/*
* Check if we have already mapped vdso blob - fail to prevent
* abusing from userspace install_speciall_mapping, which may
* abusing from userspace install_special_mapping, which may
* not do accounting and rlimit right.
* We could search vma near context.vdso, but it's a slowpath,
* so let's explicitly check all VMAs to be completely sure.

View file

@ -137,7 +137,7 @@ SYM_FUNC_START(__vdso_sgx_enter_enclave)
/*
* If the return from callback is zero or negative, return immediately,
* else re-execute ENCLU with the postive return value interpreted as
* else re-execute ENCLU with the positive return value interpreted as
* the requested ENCLU function.
*/
cmp $0, %eax

View file

@ -594,7 +594,7 @@ static __init int bts_init(void)
* we cannot use the user mapping since it will not be available
* if we're not running the owning process.
*
* With PTI we can't use the kernal map either, because its not
* With PTI we can't use the kernel map either, because its not
* there when we run userspace.
*
* For now, disable this driver when using PTI.

View file

@ -2776,7 +2776,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
* processing loop coming after that the function, otherwise
* phony regular samples may be generated in the sampling buffer
* not marked with the EXACT tag. Another possibility is to have
* one PEBS event and at least one non-PEBS event whic hoverflows
* one PEBS event and at least one non-PEBS event which overflows
* while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
* not be set, yet the overflow status bit for the PEBS counter will
* be on Skylake.

View file

@ -1313,7 +1313,7 @@ static __initconst const struct x86_pmu p4_pmu = {
.get_event_constraints = x86_get_event_constraints,
/*
* IF HT disabled we may need to use all
* ARCH_P4_MAX_CCCR counters simulaneously
* ARCH_P4_MAX_CCCR counters simultaneously
* though leave it restricted at moment assuming
* HT is on
*/

View file

@ -9,7 +9,7 @@
* Functions to keep the agpgart mappings coherent with the MMU. The
* GART gives the CPU a physical alias of pages in memory. The alias
* region is mapped uncacheable. Make sure there are no conflicting
* mappings with different cachability attributes for the same
* mappings with different cacheability attributes for the same
* page. This avoids data corruption on some CPUs.
*/

View file

@ -3,7 +3,7 @@
#define _ASM_X86_INTEL_PT_H
#define PT_CPUID_LEAVES 2
#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */
#define PT_CPUID_REGS_NUM 4 /* number of registers (eax, ebx, ecx, edx) */
enum pt_capabilities {
PT_CAP_max_subleaf = 0,

View file

@ -8,7 +8,7 @@
/*
* The set_memory_* API can be used to change various attributes of a virtual
* address range. The attributes include:
* Cachability : UnCached, WriteCombining, WriteThrough, WriteBack
* Cacheability : UnCached, WriteCombining, WriteThrough, WriteBack
* Executability : eXecutable, NoteXecutable
* Read/Write : ReadOnly, ReadWrite
* Presence : NotPresent

View file

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Shared support code for AMD K8 northbridges and derivates.
* Shared support code for AMD K8 northbridges and derivatives.
* Copyright 2006 Andi Kleen, SUSE Labs.
*/

View file

@ -1025,7 +1025,7 @@ static int apm_enable_power_management(int enable)
* status which gives the rough battery status, and current power
* source. The bat value returned give an estimate as a percentage
* of life and a status value for the battery. The estimated life
* if reported is a lifetime in secodnds/minutes at current power
* if reported is a lifetime in seconds/minutes at current power
* consumption.
*/

View file

@ -301,7 +301,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
* The operating system must reload CR3 to cause the TLB to be flushed"
*
* As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
* should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
* should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
* to be modified.
*/
if (c->x86 == 5 && c->x86_model == 9) {

View file

@ -142,7 +142,7 @@ static struct severity {
MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR)
),
MCESEV(
KEEP, "Non signalled machine check",
KEEP, "Non signaled machine check",
SER, BITCLR(MCI_STATUS_S)
),

View file

@ -799,7 +799,7 @@ void mtrr_ap_init(void)
*
* This routine is called in two cases:
*
* 1. very earily time of software resume, when there absolutely
* 1. very early time of software resume, when there absolutely
* isn't mtrr entry changes;
*
* 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug

View file

@ -397,7 +397,7 @@ void mon_event_count(void *info)
* timer. Having 1s interval makes the calculation of bandwidth simpler.
*
* Although MBA's goal is to restrict the bandwidth to a maximum, there may
* be a need to increase the bandwidth to avoid uncecessarily restricting
* be a need to increase the bandwidth to avoid unnecessarily restricting
* the L2 <-> L3 traffic.
*
* Since MBA controls the L2 external bandwidth where as MBM measures the
@ -480,7 +480,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
/*
* Delta values are updated dynamically package wise for each
* rdtgrp everytime the throttle MSR changes value.
* rdtgrp every time the throttle MSR changes value.
*
* This is because (1)the increase in bandwidth is not perfectly
* linear and only "approximately" linear even when the hardware

View file

@ -2555,7 +2555,7 @@ static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
/*
* This creates a directory mon_data which contains the monitored data.
*
* mon_data has one directory for each domain whic are named
* mon_data has one directory for each domain which are named
* in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
* with L3 domain looks as below:
* ./mon_data:

View file

@ -107,7 +107,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
* - Write protect disabled
* - No task switch
* - Don't do FP software emulation.
* - Proctected mode enabled
* - Protected mode enabled
*/
movl %cr0, %eax
andl $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax

View file

@ -121,7 +121,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
* - Write protect disabled
* - No task switch
* - Don't do FP software emulation.
* - Proctected mode enabled
* - Protected mode enabled
*/
movq %cr0, %rax
andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax

View file

@ -204,7 +204,7 @@ static void native_stop_other_cpus(int wait)
}
/*
* Don't wait longer than 10 ms if the caller didn't
* reqeust it. If wait is true, the machine hangs here if
* request it. If wait is true, the machine hangs here if
* one or more CPUs do not reach shutdown state.
*/
timeout = USEC_PER_MSEC * 10;

View file

@ -472,7 +472,7 @@ void check_tsc_sync_target(void)
/*
* Add the result to the previous adjustment value.
*
* The adjustement value is slightly off by the overhead of the
* The adjustment value is slightly off by the overhead of the
* sync mechanism (observed values are ~200 TSC cycles), but this
* really depends on CPU, node distance and frequency. So
* compensating for this is hard to get right. Experiments show

View file

@ -272,7 +272,7 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst,
* by whether the operand is a register or a memory location.
* If operand is a register, return as many bytes as the operand
* size. If operand is memory, return only the two least
* siginificant bytes.
* significant bytes.
*/
if (X86_MODRM_MOD(insn->modrm.value) == 3)
*data_size = insn->opnd_bytes;

View file

@ -727,7 +727,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
struct amd_svm_iommu_ir *ir;
/**
* In some cases, the existing irte is updaed and re-set,
* In some cases, the existing irte is updated and re-set,
* so we need to check here if it's already been * added
* to the ir_list.
*/

View file

@ -3537,7 +3537,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* snapshot restore (migration).
*
* In this flow, it is assumed that vmcs12 cache was
* trasferred as part of captured nVMX state and should
* transferred as part of captured nVMX state and should
* therefore not be read from guest memory (which may not
* exist on destination host yet).
*/

View file

@ -964,7 +964,7 @@ int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d)
/* The return value (in eax) is zero if the result is exact,
if bits are changed due to rounding, truncation, etc, then
a non-zero value is returned */
/* Overflow is signalled by a non-zero return value (in eax).
/* Overflow is signaled by a non-zero return value (in eax).
In the case of overflow, the returned significand always has the
largest possible value */
int FPU_round_to_int(FPU_REG *r, u_char tag)

View file

@ -575,7 +575,7 @@ Normalise_result:
#ifdef PECULIAR_486
/*
* This implements a special feature of 80486 behaviour.
* Underflow will be signalled even if the number is
* Underflow will be signaled even if the number is
* not a denormal after rounding.
* This difference occurs only for masked underflow, and not
* in the unmasked case.

View file

@ -1497,7 +1497,7 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
* userspace task is trying to access some valid (from guest's point of
* view) memory which is not currently mapped by the host (e.g. the
* memory is swapped out). Note, the corresponding "page ready" event
* which is injected when the memory becomes available, is delived via
* which is injected when the memory becomes available, is delivered via
* an interrupt mechanism and not a #PF exception
* (see arch/x86/kernel/kvm.c: sysvec_kvm_asyncpf_interrupt()).
*

View file

@ -756,7 +756,7 @@ void __init init_mem_mapping(void)
#ifdef CONFIG_X86_64
if (max_pfn > max_low_pfn) {
/* can we preseve max_low_pfn ?*/
/* can we preserve max_low_pfn ?*/
max_low_pfn = max_pfn;
}
#else

View file

@ -128,7 +128,7 @@ u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
/*
* Called from the FPU code when creating a fresh set of FPU
* registers. This is called from a very specific context where
* we know the FPU regstiers are safe for use and we can use PKRU
* we know the FPU registers are safe for use and we can use PKRU
* directly.
*/
void copy_init_pkru_to_fpregs(void)

View file

@ -441,7 +441,7 @@ void __init efi_free_boot_services(void)
* 1.4.4 with SGX enabled booting Linux via Fedora 24's
* grub2-efi on a hard disk. (And no, I don't know why
* this happened, but Linux should still try to boot rather
* panicing early.)
* panicking early.)
*/
rm_size = real_mode_size_needed();
if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {

View file

@ -27,7 +27,7 @@ static bool lid_wake_on_close;
* wake-on-close. This is implemented as standard by the XO-1.5 DSDT.
*
* We provide here a sysfs attribute that will additionally enable
* wake-on-close behavior. This is useful (e.g.) when we oportunistically
* wake-on-close behavior. This is useful (e.g.) when we opportunistically
* suspend with the display running; if the lid is then closed, we want to
* wake up to turn the display off.
*

View file

@ -131,7 +131,7 @@ void * __init prom_early_alloc(unsigned long size)
const size_t chunk_size = max(PAGE_SIZE, size);
/*
* To mimimize the number of allocations, grab at least
* To minimize the number of allocations, grab at least
* PAGE_SIZE of memory (that's an arbitrary choice that's
* fast enough on the platforms we care about while minimizing
* wasted bootmem) and hand off chunks of it to callers.

View file

@ -321,7 +321,7 @@ int hibernate_resume_nonboot_cpu_disable(void)
/*
* When bsp_check() is called in hibernate and suspend, cpu hotplug
* is disabled already. So it's unnessary to handle race condition between
* is disabled already. So it's unnecessary to handle race condition between
* cpumask query and cpu hotplug.
*/
static int bsp_check(void)

View file

@ -103,7 +103,7 @@ static void __init setup_real_mode(void)
*ptr += phys_base;
}
/* Must be perfomed *after* relocation. */
/* Must be performed *after* relocation. */
trampoline_header = (struct trampoline_header *)
__va(real_mode_header->trampoline_header);

View file

@ -2410,7 +2410,7 @@ int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
rmd.prot = prot;
/*
* We use the err_ptr to indicate if there we are doing a contiguous
* mapping or a discontigious mapping.
* mapping or a discontiguous mapping.
*/
rmd.contiguous = !err_ptr;
rmd.no_translate = no_translate;