ARC: Fix typos

Fix typos, most reported by "codespell arch/arc".  Only touches comments,
no code changes.

Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: Vineet Gupta <vgupta@kernel.org>
This commit is contained in:
Bjorn Helgaas 2024-03-29 17:14:32 -05:00 committed by Vineet Gupta
parent d5272aaa82
commit ebfc2fd887
25 changed files with 45 additions and 44 deletions

View File

@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# uImage build relies on mkimage being availble on your host for ARC target
# uImage build relies on mkimage being available on your host for ARC target
# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
# and make sure it's reacable from your PATH
# and make sure it's reachable from your PATH
OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S

View File

@ -119,9 +119,9 @@
/*
* The DW APB ICTL intc on MB is connected to CPU intc via a
* DT "invisible" DW APB GPIO block, configured to simply pass thru
* interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c)
* interrupts - setup accordingly in platform init (plat-axs10x/ax10x.c)
*
* So here we mimic a direct connection betwen them, ignoring the
* So here we mimic a direct connection between them, ignoring the
* ABPG GPIO. Thus set "interrupts = <24>" (DW APB GPIO to core)
* instead of "interrupts = <12>" (DW APB ICTL to DW APB GPIO)
*

View File

@ -113,7 +113,7 @@
/*
* Embedded Vision subsystem UIO mappings; only relevant for EV VDK
*
* This node is intentionally put outside of MB above becase
* This node is intentionally put outside of MB above because
* it maps areas outside of MB's 0xez-0xfz.
*/
uio_ev: uio@d0000000 {

View File

@ -12,7 +12,7 @@
/*
* DSP-related saved registers - need to be saved only when you are
* scheduled out.
* structure fields name must correspond to aux register defenitions for
* structure fields name must correspond to aux register definitions for
* automatic offset calculation in DSP_AUX_SAVE_RESTORE macros
*/
struct dsp_callee_regs {

View File

@ -7,7 +7,7 @@
* Stack switching code can no longer reliably rely on the fact that
* if we are NOT in user mode, stack is switched to kernel mode.
* e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
* it's prologue including stack switching from user mode
* its prologue including stack switching from user mode
*
* Vineetg: Aug 28th 2008: Bug #94984
* -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
@ -143,7 +143,7 @@
* 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
* 3. But before it could switch SP from USER to KERNEL stack
* a L2 IRQ "Interrupts" L1
* Thay way although L2 IRQ happened in Kernel mode, stack is still
* That way although L2 IRQ happened in Kernel mode, stack is still
* not switched.
* To handle this, we may need to switch stack even if in kernel mode
* provided SP has values in range of USER mode stack ( < 0x7000_0000 )
@ -173,7 +173,7 @@
GET_CURR_TASK_ON_CPU r9
/* With current tsk in r9, get it's kernel mode stack base */
/* With current tsk in r9, get its kernel mode stack base */
GET_TSK_STACK_BASE r9, r9
/* save U mode SP @ pt_regs->sp */
@ -282,7 +282,7 @@
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
* for memory load operations. If used in that way interrupts are deffered
* for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro EXCEPTION_EPILOGUE
@ -350,7 +350,7 @@
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
* for memory load operations. If used in that way interrupts are deffered
* for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro INTERRUPT_EPILOGUE LVL

View File

@ -7,7 +7,7 @@
#ifndef __ASM_ARC_ENTRY_H
#define __ASM_ARC_ENTRY_H
#include <asm/unistd.h> /* For NR_syscalls defination */
#include <asm/unistd.h> /* For NR_syscalls definition */
#include <asm/arcregs.h>
#include <asm/ptrace.h>
#include <asm/processor.h> /* For VMALLOC_START */
@ -56,7 +56,7 @@
.endm
/*-------------------------------------------------------------
* given a tsk struct, get to the base of it's kernel mode stack
* given a tsk struct, get to the base of its kernel mode stack
* tsk->thread_info is really a PAGE, whose bottom hoists stack
* which grows upwards towards thread_info
*------------------------------------------------------------*/

View File

@ -10,7 +10,7 @@
* ARCv2 can support 240 interrupts in the core interrupts controllers and
* 128 interrupts in IDU. Thus 512 virtual IRQs must be enough for most
* configurations of boards.
* This doesnt affect ARCompact, but we change it to same value
* This doesn't affect ARCompact, but we change it to same value
*/
#define NR_IRQS 512

View File

@ -46,7 +46,7 @@
* IRQ Control Macros
*
* All of them have "memory" clobber (compiler barrier) which is needed to
* ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
* ensure that LD/ST requiring irq safety (R-M-W when LLSC is not available)
* are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
*
* Noted at the time of Abilis Timer List corruption

View File

@ -165,7 +165,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* for retiring-mm. However destroy_context( ) still needs to do that because
* between mm_release( ) = >deactive_mm( ) and
* mmput => .. => __mmdrop( ) => destroy_context( )
* there is a good chance that task gets sched-out/in, making it's ASID valid
* there is a good chance that task gets sched-out/in, making its ASID valid
* again (this teased me for a whole day).
*/

View File

@ -66,7 +66,7 @@
* Other rules which cause the divergence from 1:1 mapping
*
* 1. Although ARC700 can do exclusive execute/write protection (meaning R
* can be tracked independet of X/W unlike some other CPUs), still to
* can be tracked independently of X/W unlike some other CPUs), still to
* keep things consistent with other archs:
* -Write implies Read: W => R
* -Execute implies Read: X => R

View File

@ -6,7 +6,7 @@
#ifndef __ARC_ASM_SHMPARAM_H
#define __ARC_ASM_SHMPARAM_H
/* Handle upto 2 cache bins */
/* Handle up to 2 cache bins */
#define SHMLBA (2 * PAGE_SIZE)
/* Enforce SHMLBA in shmat */

View File

@ -77,7 +77,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
/*
* ARC700 doesn't support atomic Read-Modify-Write ops.
* Originally Interrupts had to be disabled around code to gaurantee atomicity.
* Originally Interrupts had to be disabled around code to guarantee atomicity.
* The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
* based on retry-if-irq-in-atomic (with hardware assist).
* However despite these, we provide the IRQ disabling variant
@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
* support needed.
*
* (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
* gaurantted by the platform (not something which core handles).
* guaranteed by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.
*

View File

@ -38,7 +38,7 @@
struct thread_info {
unsigned long flags; /* low level flags */
unsigned long ksp; /* kernel mode stack top in __switch_to */
int preempt_count; /* 0 => preemptable, <0 => BUG */
int preempt_count; /* 0 => preemptible, <0 => BUG */
int cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */
struct task_struct *task; /* main task structure */

View File

@ -62,7 +62,7 @@
* 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem
*
* Joern suggested a better "C" algorithm which is great since
* (1) It is portable to any architecure
* (1) It is portable to any architecture
* (2) At the same time it takes advantage of ARC ISA (rotate intrns)
*/

View File

@ -5,7 +5,7 @@
* Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */
#include <linux/linkage.h> /* ARC_{ENTRY,EXIT} */
#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */
#include <asm/errno.h>
#include <asm/arcregs.h>
@ -31,7 +31,7 @@ VECTOR res_service ; Reset Vector
VECTOR mem_service ; Mem exception
VECTOR instr_service ; Instrn Error
VECTOR EV_MachineCheck ; Fatal Machine check
VECTOR EV_TLBMissI ; Intruction TLB miss
VECTOR EV_TLBMissI ; Instruction TLB miss
VECTOR EV_TLBMissD ; Data TLB miss
VECTOR EV_TLBProtV ; Protection Violation
VECTOR EV_PrivilegeV ; Privilege Violation
@ -76,11 +76,11 @@ ENTRY(handle_interrupt)
# query in hard ISR path would return false (since .IE is set) which would
# trips genirq interrupt handling asserts.
#
# So do a "soft" disable of interrutps here.
# So do a "soft" disable of interrupts here.
#
# Note this disable is only for consistent book-keeping as further interrupts
# will be disabled anyways even w/o this. Hardware tracks active interrupts
# seperately in AUX_IRQ_ACT.active and will not take new interrupts
# separately in AUX_IRQ_ACT.active and will not take new interrupts
# unless this one returns (or higher prio becomes pending in 2-prio scheme)
IRQ_DISABLE

View File

@ -95,7 +95,7 @@ ENTRY(EV_MachineCheck)
lr r0, [efa]
mov r1, sp
; MC excpetions disable MMU
; MC exceptions disable MMU
ARC_MMU_REENABLE r3
lsr r3, r10, 8
@ -209,7 +209,7 @@ trap_with_param:
; ---------------------------------------------
; syscall TRAP
; ABI: (r0-r7) upto 8 args, (r8) syscall number
; ABI: (r0-r7) up to 8 args, (r8) syscall number
; ---------------------------------------------
ENTRY(EV_Trap)

View File

@ -165,7 +165,7 @@ ENTRY(first_lines_of_secondary)
; setup stack (fp, sp)
mov fp, 0
; set it's stack base to tsk->thread_info bottom
; set its stack base to tsk->thread_info bottom
GET_TSK_STACK_BASE r0, sp
j start_kernel_secondary

View File

@ -56,7 +56,7 @@ void arc_init_IRQ(void)
WRITE_AUX(AUX_IRQ_CTRL, ictrl);
/*
* ARCv2 core intc provides multiple interrupt priorities (upto 16).
* ARCv2 core intc provides multiple interrupt priorities (up to 16).
* Typical builds though have only two levels (0-high, 1-low)
* Linux by default uses lower prio 1 for most irqs, reserving 0 for
* NMI style interrupts in future (say perf)

View File

@ -38,7 +38,7 @@
* (based on a specific RTL build)
* Below is the static map between perf generic/arc specific event_id and
* h/w condition names.
* At the time of probe, we loop thru each index and find it's name to
* At the time of probe, we loop thru each index and find its name to
* complete the mapping of perf event_id to h/w index as latter is needed
* to program the counter really
*/

View File

@ -390,7 +390,7 @@ static void arc_chk_core_config(struct cpuinfo_arc *info)
#ifdef CONFIG_ARC_HAS_DCCM
/*
* DCCM can be arbit placed in hardware.
* Make sure it's placement/sz matches what Linux is built with
* Make sure its placement/sz matches what Linux is built with
*/
if ((unsigned int)__arc_dccm_base != info->dccm.base)
panic("Linux built with incorrect DCCM Base address\n");

View File

@ -8,15 +8,16 @@
*
* vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
* -do_signal() supports TIF_RESTORE_SIGMASK
* -do_signal() no loner needs oldset, required by OLD sys_sigsuspend
* -sys_rt_sigsuspend() now comes from generic code, so discard arch implemen
* -do_signal() no longer needs oldset, required by OLD sys_sigsuspend
* -sys_rt_sigsuspend() now comes from generic code, so discard arch
* implementation
* -sys_sigsuspend() no longer needs to fudge ptregs, hence that arg removed
* -sys_sigsuspend() no longer loops for do_signal(), sets TIF_xxx and leaves
* the job to do_signal()
*
* vineetg: July 2009
* -Modified Code to support the uClibc provided userland sigreturn stub
* to avoid kernel synthesing it on user stack at runtime, costing TLB
* to avoid kernel synthesizing it on user stack at runtime, costing TLB
* probes and Cache line flushes.
*
* vineetg: July 2009

View File

@ -89,7 +89,7 @@ int do_misaligned_access(unsigned long address, struct pt_regs *regs,
/*
* Entry point for miscll errors such as Nested Exceptions
* -Duplicate TLB entry is handled seperately though
* -Duplicate TLB entry is handled separately though
*/
void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
{

View File

@ -41,8 +41,8 @@ SECTIONS
#endif
/*
* The reason for having a seperate subsection .init.ramfs is to
* prevent objump from including it in kernel dumps
* The reason for having a separate subsection .init.ramfs is to
* prevent objdump from including it in kernel dumps
*
* Reason for having .init.ramfs above .init is to make sure that the
* binary blob is tucked away to one side, reducing the displacement

View File

@ -212,7 +212,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long flags;
/* If range @start to @end is more than 32 TLB entries deep,
* its better to move to a new ASID rather than searching for
* it's better to move to a new ASID rather than searching for
* individual entries and then shooting them down
*
* The calc above is rough, doesn't account for unaligned parts,
@ -408,7 +408,7 @@ static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *p
* -More importantly it makes this handler inconsistent with fast-path
* TLB Refill handler which always deals with "current"
*
* Lets see the use cases when current->mm != vma->mm and we land here
* Let's see the use cases when current->mm != vma->mm and we land here
* 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
* Here VM wants to pre-install a TLB entry for user stack while
* current->mm still points to pre-execve mm (hence the condition).

View File

@ -5,19 +5,19 @@
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* Vineetg: April 2011 :
* -MMU v1: moved out legacy code into a seperate file
* -MMU v1: moved out legacy code into a separate file
* -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
* helps avoid a shift when preparing PD0 from PTE
*
* Vineetg: July 2009
* -For MMU V2, we need not do heuristics at the time of commiting a D-TLB
* entry, so that it doesn't knock out it's I-TLB entry
* -For MMU V2, we need not do heuristics at the time of committing a D-TLB
* entry, so that it doesn't knock out its I-TLB entry
* -Some more fine tuning:
* bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
*
* Vineetg: July 2009
* -Practically rewrote the I/D TLB Miss handlers
* Now 40 and 135 instructions a peice as compared to 131 and 449 resp.
* Now 40 and 135 instructions apiece as compared to 131 and 449 resp.
* Hence Leaner by 1.5 K
* Used Conditional arithmetic to replace excessive branching
* Also used short instructions wherever possible