Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial

Pull trivial tree updates from Jiri Kosina.

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial: (21 commits)
  gitignore: fix wording
  mfd: ab8500-debugfs: fix "between" in printk
  memstick: trivial fix of spelling mistake on management
  cpupowerutils: bench: fix "average"
  treewide: Fix typos in printk
  IB/mlx4: printk fix
  pinctrl: sirf/atlas7: fix printk spelling
  serial: mctrl_gpio: Grammar s/lines GPIOs/line GPIOs/, /sets/set/
  w1: comment spelling s/minmum/minimum/
  Blackfin: comment spelling s/divsor/divisor/
  metag: Fix misspellings in comments.
  ia64: Fix misspellings in comments.
  hexagon: Fix misspellings in comments.
  tools/perf: Fix misspellings in comments.
  cris: Fix misspellings in comments.
  c6x: Fix misspellings in comments.
  blackfin: Fix misspelling of 'register' in comment.
  avr32: Fix misspelling of 'definitions' in comment.
  treewide: Fix typos in printk
  Doc: treewide : Fix typos in DocBook/filesystem.xml
  ...
This commit is contained in:
Linus Torvalds 2016-05-17 17:05:30 -07:00
commit 16bf834805
64 changed files with 86 additions and 86 deletions

2
.gitignore vendored
View file

@ -62,7 +62,7 @@ Module.symvers
/tar-install/
#
# git files that we don't want to ignore even it they are dot-files
# git files that we don't want to ignore even if they are dot-files
#
!.gitignore
!.mailmap

View file

@ -1,5 +1,5 @@
/*
* Defitions for the address spaces of the AVR32 CPUs. Heavily based on
* Definitions for the address spaces of the AVR32 CPUs. Heavily based on
* include/asm-sh/addrspace.h
*
* Copyright (C) 2004-2006 Atmel Corporation

View file

@ -154,7 +154,7 @@ ENTRY(___udivsi3)
CC = R7 < 0; /* Check quotient(AQ) */
/* If AQ==0, we'll sub divisor */
IF CC R5 = R1; /* and if AQ==1, we'll add it. */
R3 = R3 + R5; /* Add/sub divsor to partial remainder */
R3 = R3 + R5; /* Add/sub divisor to partial remainder */
R7 = R3 ^ R1; /* Generate next quotient bit */
R5 = R7 >> 31; /* Get AQ */

View file

@ -2689,7 +2689,7 @@
#define L2CTL0_STAT 0xFFCA3010 /* L2CTL0 L2 Status Register */
#define L2CTL0_RPCR 0xFFCA3014 /* L2CTL0 L2 Read Priority Count Register */
#define L2CTL0_WPCR 0xFFCA3018 /* L2CTL0 L2 Write Priority Count Register */
#define L2CTL0_RFA 0xFFCA3024 /* L2CTL0 L2 Refresh Address Regsiter */
#define L2CTL0_RFA 0xFFCA3024 /* L2CTL0 L2 Refresh Address Register */
#define L2CTL0_ERRADDR0 0xFFCA3040 /* L2CTL0 L2 Bank 0 ECC Error Address Register */
#define L2CTL0_ERRADDR1 0xFFCA3044 /* L2CTL0 L2 Bank 1 ECC Error Address Register */
#define L2CTL0_ERRADDR2 0xFFCA3048 /* L2CTL0 L2 Bank 2 ECC Error Address Register */

View file

@ -101,7 +101,7 @@ struct clk {
#define CLK_PLL BIT(2) /* PLL-derived clock */
#define PRE_PLL BIT(3) /* source is before PLL mult/div */
#define FIXED_DIV_PLL BIT(4) /* fixed divisor from PLL */
#define FIXED_RATE_PLL BIT(5) /* fixed ouput rate PLL */
#define FIXED_RATE_PLL BIT(5) /* fixed output rate PLL */
#define MAX_PLL_SYSCLKS 16

View file

@ -145,7 +145,7 @@ static void cache_block_operation(unsigned int *start,
spin_lock_irqsave(&cache_lock, flags);
/*
* If another cache operation is occuring
* If another cache operation is occurring
*/
if (unlikely(imcr_get(wc_reg))) {
spin_unlock_irqrestore(&cache_lock, flags);

View file

@ -212,7 +212,7 @@ static struct mtd_info *probe_cs(struct map_info *map_cs)
/*
* Probe each chip select individually for flash chips. If there are chips on
* both cse0 and cse1, the mtd_info structs will be concatenated to one struct
* so that MTD partitions can cross chip boundries.
* so that MTD partitions can cross chip boundaries.
*
* The only known restriction to how you can mount your chips is that each
* chip select must hold similar flash chips. But you need external hardware

View file

@ -246,7 +246,7 @@ static struct mtd_info *probe_cs(struct map_info *map_cs)
/*
* Probe each chip select individually for flash chips. If there are chips on
* both cse0 and cse1, the mtd_info structs will be concatenated to one struct
* so that MTD partitions can cross chip boundries.
* so that MTD partitions can cross chip boundaries.
*
* The only known restriction to how you can mount your chips is that each
* chip select must hold similar flash chips. But you need external hardware

View file

@ -525,7 +525,7 @@ static int setup_cipher_iv_desc(struct cryptocop_tfrm_ctx *tc, struct cryptocop_
return 0;
}
/* Map the ouput length of the transform to operation output starting on the inject index. */
/* Map the output length of the transform to operation output starting on the inject index. */
static int create_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag)
{
int err = 0;

View file

@ -11,7 +11,7 @@
*/
/* Just to be certain the config file is included, we include it here
* explicitely instead of depending on it being included in the file that
* explicitly instead of depending on it being included in the file that
* uses this code.
*/

View file

@ -11,7 +11,7 @@
*/
/* Just to be certain the config file is included, we include it here
* explicitely instead of depending on it being included in the file that
* explicitly instead of depending on it being included in the file that
* uses this code.
*/

View file

@ -237,7 +237,7 @@ static inline long __vmintop_clear(long i)
/*
* The initial program gets to find a system environment descriptor
* on its stack when it begins exection. The first word is a version
* on its stack when it begins execution. The first word is a version
* code to indicate what is there. Zero means nothing more.
*/

View file

@ -78,7 +78,7 @@
#define __HEXAGON_C_WB_L2 0x7 /* Write-back, with L2 */
/*
* This can be overriden, but we're defaulting to the most aggressive
* This can be overridden, but we're defaulting to the most aggressive
* cache policy, the better to find bugs sooner.
*/

View file

@ -236,9 +236,9 @@ static struct notifier_block kgdb_notifier = {
};
/**
* kgdb_arch_init - Perform any architecture specific initalization.
* kgdb_arch_init - Perform any architecture specific initialization.
*
* This function will handle the initalization of any architecture
* This function will handle the initialization of any architecture
* specific callbacks.
*/
int kgdb_arch_init(void)

View file

@ -26,7 +26,7 @@
* could be, and perhaps some day will be, handled as in-line
* macros, but for tracing/debugging it's handy to have
* a single point of invocation for each of them.
* Conveniently, they take paramters and return values
* Conveniently, they take parameters and return values
* consistent with the ABI calling convention.
*/

View file

@ -39,7 +39,7 @@
* DJH 10/14/09 Version 1.3 added special loop for aligned case, was
* overreading bloated codesize back up to 892
* DJH 4/20/10 Version 1.4 fixed Ldword_loop_epilog loop to prevent loads
* occuring if only 1 left outstanding, fixes bug
* occurring if only 1 left outstanding, fixes bug
* # 3888, corrected for all alignments. Peeled off
* 1 32byte chunk from kernel loop and extended 8byte
* loop at end to solve all combinations and prevent

View file

@ -131,7 +131,7 @@ struct ioc3 {
#define SSCR_PAUSE_STATE 0x40000000 /* set when PAUSE takes effect*/
#define SSCR_RESET 0x80000000 /* reset DMA channels */
/* all producer/comsumer pointers are the same bitfield */
/* all producer/consumer pointers are the same bitfield */
#define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */
#define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */
#define PROD_CONS_PTR_OFF 3

View file

@ -1385,7 +1385,7 @@ typedef union ii_ibcr_u {
* respones are captured until IXSS[VALID] is cleared by setting the *
* appropriate bit in IECLR. Every time a spurious read response is *
* detected, the SPUR_RD bit of the PRB corresponding to the incoming *
* message's SIDN field is set. This always happens, regarless of *
* message's SIDN field is set. This always happens, regardless of *
* whether a header is captured. The programmer should check *
* IXSM[SIDN] to determine which widget sent the spurious response, *
* because there may be more than one SPUR_RD bit set in the PRB *
@ -2997,7 +2997,7 @@ typedef union ii_ippr_u {
/*
* Values for field imsgtype
*/
#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */
#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming message from Xtalk */
#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
#define IIO_ICRB_IMSGT_SN1NET 2 /* Incoming message from SN1 net */
#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */

View file

@ -964,7 +964,7 @@ efi_uart_console_only(void)
/*
* Look for the first granule aligned memory descriptor memory
* that is big enough to hold EFI memory map. Make sure this
* descriptor is atleast granule sized so it does not get trimmed
* descriptor is at least granule sized so it does not get trimmed
*/
struct kern_memdesc *
find_memmap_space (void)

View file

@ -11,7 +11,7 @@
*
* For more details on the theory behind these algorithms, see "IA-64
* and Elementary Functions" by Peter Markstein; HP Professional Books
* (http://www.hp.com/go/retailbooks/)
* (http://www.goodreads.com/book/show/2019887.Ia_64_and_Elementary_Functions)
*/
#include <asm/asmmacro.h>

View file

@ -11,7 +11,7 @@
*
* For more details on the theory behind these algorithms, see "IA-64
* and Elementary Functions" by Peter Markstein; HP Professional Books
* (http://www.hp.com/go/retailbooks/)
* (http://www.goodreads.com/book/show/2019887.Ia_64_and_Elementary_Functions)
*/
#include <asm/asmmacro.h>

View file

@ -1165,7 +1165,7 @@
#define TXSTATUS_IPTOGGLE_BIT 0x80000000 /* Prev PToggle of TXPRIVEXT */
#define TXSTATUS_ISTATE_BIT 0x40000000 /* IState bit */
#define TXSTATUS_IWAIT_BIT 0x20000000 /* wait indefinitely in decision step*/
#define TXSTATUS_IEXCEPT_BIT 0x10000000 /* Indicate an exception occured */
#define TXSTATUS_IEXCEPT_BIT 0x10000000 /* Indicate an exception occurred */
#define TXSTATUS_IRPCOUNT_BITS 0x0E000000 /* Number of 'dirty' date entries*/
#define TXSTATUS_IRPCOUNT_S 25
#define TXSTATUS_IRQSTAT_BITS 0x0000F000 /* IRQEnc bits, trigger or interrupts */

View file

@ -668,7 +668,7 @@ typedef union _tbires_tag_ {
State.Sig.TrigMask will indicate the bits set within TXMASKI at
the time of the handler call that have all been cleared to prevent
nested interrupt occuring immediately.
nested interrupt occurring immediately.
State.Sig.SaveMask is a bit-mask which will be set to Zero when a trigger
occurs at background level and TBICTX_CRIT_BIT and optionally
@ -1083,7 +1083,7 @@ TBIRES __TBINestInts( TBIRES State, void *pExt, int NoNestMask );
/* This routine causes the TBICTX structure specified in State.Sig.pCtx to
be restored. This implies that execution will not return to the caller.
The State.Sig.TrigMask field will be restored during the context switch
such that any immediately occuring interrupts occur in the context of the
such that any immediately occurring interrupts occur in the context of the
newly specified task. The State.Sig.SaveMask parameter is ignored. */
void __TBIASyncResume( TBIRES State );
@ -1305,7 +1305,7 @@ extern const char __TBISigNames[];
/*
* Calculate linear PC value from real PC and Minim mode control, the LSB of
* the result returned indicates if address compression has occured.
* the result returned indicates if address compression has occurred.
*/
#ifndef __ASSEMBLY__
#define METAG_LINPC( PCVal ) (\

View file

@ -15,7 +15,7 @@
#include <asm/tbx.h>
/* BEGIN HACK */
/* define these for now while doing inital conversion to GAS
/* define these for now while doing initial conversion to GAS
will fix properly later */
/* Signal identifiers always have the TBID_SIGNAL_BIT set and contain the

View file

@ -56,7 +56,7 @@ ___TBIJumpX:
/*
* TBIRES __TBISwitch( TBIRES Switch, PTBICTX *rpSaveCtx )
*
* Software syncronous context switch between soft threads, save only the
* Software synchronous context switch between soft threads, save only the
* registers which are actually valid on call entry.
*
* A0FrP, D0RtP, D0.5, D0.6, D0.7 - Saved on stack
@ -76,7 +76,7 @@ $LSwitchStart:
SETL [A0StP+#8++],D0FrT,D1RtP
/*
* Save current frame state - we save all regs because we don't want
* uninitialised crap in the TBICTX structure that the asyncronous resumption
* uninitialised crap in the TBICTX structure that the asynchronous resumption
* of a thread will restore.
*/
MOVT D1Re0,#HI($LSwitchExit) /* ASync resume point here */
@ -117,7 +117,7 @@ $LSwitchExit:
* This routine causes the TBICTX structure specified in State.Sig.pCtx to
* be restored. This implies that execution will not return to the caller.
* The State.Sig.TrigMask field will be ored into TXMASKI during the
* context switch such that any immediately occuring interrupts occur in
* context switch such that any immediately occurring interrupts occur in
* the context of the newly specified task. The State.Sig.SaveMask parameter
* is ignored.
*/

View file

@ -284,7 +284,7 @@ void machine_check_print_event_info(struct machine_check_event *evt)
printk("%s Effective address: %016llx\n",
level, evt->u.ue_error.effective_address);
if (evt->u.ue_error.physical_address_provided)
printk("%s Physial address: %016llx\n",
printk("%s Physical address: %016llx\n",
level, evt->u.ue_error.physical_address);
break;
case MCE_ERROR_TYPE_SLB:

View file

@ -422,7 +422,7 @@ static void show_saved_mc(void)
data_size = get_datasize(mc_saved_header);
date = mc_saved_header->date;
pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n",
pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
i, sig, pf, rev, total_size,
date & 0xffff,
date >> 24,

View file

@ -254,7 +254,7 @@ int kvm_iommu_map_guest(struct kvm *kvm)
!iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) {
printk(KERN_WARNING "%s: No interrupt remapping support,"
" disallowing device assignment."
" Re-enble with \"allow_unsafe_assigned_interrupts=1\""
" Re-enable with \"allow_unsafe_assigned_interrupts=1\""
" module option.\n", __func__);
iommu_domain_free(kvm->arch.iommu_domain);
kvm->arch.iommu_domain = NULL;

View file

@ -430,7 +430,7 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
}
/* Check that sizeof_partition_entry has the correct value */
if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) {
pr_debug("GUID Partitition Entry Size check failed.\n");
pr_debug("GUID Partition Entry Size check failed.\n");
goto fail;
}
@ -443,7 +443,7 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
le32_to_cpu((*gpt)->sizeof_partition_entry));
if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) {
pr_debug("GUID Partitition Entry Array CRC check failed.\n");
pr_debug("GUID Partition Entry Array CRC check failed.\n");
goto fail_ptes;
}

View file

@ -623,7 +623,7 @@ static unsigned int tegra20_get_pll_ref_div(void)
case OSC_CTRL_PLL_REF_DIV_4:
return 4;
default:
pr_err("Invalied pll ref divider %d\n", pll_ref_div);
pr_err("Invalid pll ref divider %d\n", pll_ref_div);
BUG();
}
return 0;

View file

@ -640,7 +640,7 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
input_addr =
dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
(unsigned long)sys_addr, (unsigned long)input_addr);
return input_addr;

View file

@ -796,9 +796,9 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
debug.state[0] == debug.state[1]) {
seq_puts(m, "seems to be stuck\n");
} else if (debug.address[0] == debug.address[1]) {
seq_puts(m, "adress is constant\n");
seq_puts(m, "address is constant\n");
} else {
seq_puts(m, "is runing\n");
seq_puts(m, "is running\n");
}
seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);

View file

@ -367,7 +367,7 @@ static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
*/
static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
{
printk(KERN_ERR MOD "ARP failure duing connect\n");
printk(KERN_ERR MOD "ARP failure during connect\n");
kfree_skb(skb);
}

View file

@ -1601,7 +1601,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
else if (ret == -ENXIO)
pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
else if (ret)
pr_err("Invalid argumant. Fail to register network rule.\n");
pr_err("Invalid argument. Fail to register network rule.\n");
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return ret;

View file

@ -1499,7 +1499,7 @@ static int cyttsp4_core_sleep_(struct cyttsp4 *cd)
if (IS_BOOTLOADER(mode[0], mode[1])) {
mutex_unlock(&cd->system_lock);
dev_err(cd->dev, "%s: Device in BOOTLADER mode.\n", __func__);
dev_err(cd->dev, "%s: Device in BOOTLOADER mode.\n", __func__);
rc = -EINVAL;
goto error;
}

View file

@ -1220,7 +1220,7 @@ static int msb_read_boot_blocks(struct msb_data *msb)
}
if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
dbg("managment flag doesn't indicate boot block %d",
dbg("management flag doesn't indicate boot block %d",
pba);
continue;
}
@ -1367,7 +1367,7 @@ static int msb_ftl_initialize(struct msb_data *msb)
static int msb_ftl_scan(struct msb_data *msb)
{
u16 pba, lba, other_block;
u8 overwrite_flag, managment_flag, other_overwrite_flag;
u8 overwrite_flag, management_flag, other_overwrite_flag;
int error;
struct ms_extra_data_register extra;
u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
@ -1409,7 +1409,7 @@ static int msb_ftl_scan(struct msb_data *msb)
}
lba = be16_to_cpu(extra.logical_address);
managment_flag = extra.management_flag;
management_flag = extra.management_flag;
overwrite_flag = extra.overwrite_flag;
overwrite_flags[pba] = overwrite_flag;
@ -1421,16 +1421,16 @@ static int msb_ftl_scan(struct msb_data *msb)
}
/* Skip system/drm blocks */
if ((managment_flag & MEMSTICK_MANAGMENT_FLAG_NORMAL) !=
MEMSTICK_MANAGMENT_FLAG_NORMAL) {
dbg("pba %05d -> [reserved managment flag %02x]",
pba, managment_flag);
if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
dbg("pba %05d -> [reserved management flag %02x]",
pba, management_flag);
msb_mark_block_used(msb, pba);
continue;
}
/* Erase temporary tables */
if (!(managment_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
dbg("pba %05d -> [temp table] - will erase", pba);
msb_mark_block_used(msb, pba);

View file

@ -47,7 +47,7 @@
#define MEMSTICK_OV_PG_NORMAL \
(MEMSTICK_OVERWRITE_PGST1 | MEMSTICK_OVERWRITE_PGST0)
#define MEMSTICK_MANAGMENT_FLAG_NORMAL \
#define MEMSTICK_MANAGEMENT_FLAG_NORMAL \
(MEMSTICK_MANAGEMENT_SYSFLG | \
MEMSTICK_MANAGEMENT_SCMS1 | \
MEMSTICK_MANAGEMENT_SCMS0) \

View file

@ -2563,7 +2563,7 @@ static ssize_t ab8500_gpadc_trig_timer_write(struct file *file,
if (user_trig_timer & ~0xFF) {
dev_err(dev,
"debugfs error input: should be beetween 0 to 255\n");
"debugfs error input: should be between 0 to 255\n");
return -EINVAL;
}

View file

@ -386,7 +386,7 @@ static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
sm_printk("sector %d of block at LBA %d of zone %d"
" coudn't be read, marking it as invalid",
" couldn't be read, marking it as invalid",
boffset / SM_SECTOR_SIZE, lba, zone);
oob.data_status = 0;

View file

@ -1515,7 +1515,7 @@ static int bgmac_mii_register(struct bgmac *bgmac)
phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phy_dev)) {
bgmac_err(bgmac, "PHY connecton failed\n");
bgmac_err(bgmac, "PHY connection failed\n");
err = PTR_ERR(phy_dev);
goto err_unregister_bus;
}

View file

@ -2772,7 +2772,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
/* Get the FM address */
res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&of_dev->dev, "%s: Can't get FMan memory resouce\n",
dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
__func__);
goto fman_node_put;
}

View file

@ -176,11 +176,11 @@ static const char *hsynd_str(u8 synd)
case MLX5_HEALTH_SYNDR_EQ_ERR:
return "EQ error";
case MLX5_HEALTH_SYNDR_EQ_INV:
return "Invalid EQ refrenced";
return "Invalid EQ referenced";
case MLX5_HEALTH_SYNDR_FFSER_ERR:
return "FFSER error";
case MLX5_HEALTH_SYNDR_HIGH_TEMP:
return "High temprature";
return "High temperature";
default:
return "unrecognized error";
}

View file

@ -140,7 +140,7 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
int err;
mlx5_core_dbg(dev, "requsted num_vfs %d\n", num_vfs);
mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
if (!mlx5_core_is_pf(dev))
return -EPERM;

View file

@ -2315,14 +2315,14 @@ static void _rtl8821ae_clear_pci_pme_status(struct ieee80211_hw *hw)
pci_read_config_byte(rtlpci->pdev, 0x34, &cap_pointer);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"PCI configration 0x34 = 0x%2x\n", cap_pointer);
"PCI configuration 0x34 = 0x%2x\n", cap_pointer);
do {
pci_read_config_word(rtlpci->pdev, cap_pointer, &cap_hdr);
cap_id = cap_hdr & 0xFF;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"in pci configration, cap_pointer%x = %x\n",
"in pci configuration, cap_pointer%x = %x\n",
cap_pointer, cap_id);
if (cap_id == 0x01) {

View file

@ -5798,7 +5798,7 @@ static void atlas7_gpio_handle_irq(struct irq_desc *desc)
status = readl(ATLAS7_GPIO_INT_STATUS(bank));
if (!status) {
pr_warn("%s: gpio [%s] status %#x no interrupt is flaged\n",
pr_warn("%s: gpio [%s] status %#x no interrupt is flagged\n",
__func__, gc->label, status);
handle_bad_irq(desc);
return;

View file

@ -80,13 +80,13 @@ static const struct attribute *hwmon_cputemp2[] = {
static ssize_t cpu0_temp_label(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "CPU 0 Temprature\n");
return sprintf(buf, "CPU 0 Temperature\n");
}
static ssize_t cpu1_temp_label(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "CPU 1 Temprature\n");
return sprintf(buf, "CPU 1 Temperature\n");
}
static ssize_t get_cpu0_temp(struct device *dev,
@ -169,7 +169,7 @@ static int __init loongson_hwmon_init(void)
ret = create_sysfs_cputemp_files(&cpu_hwmon_dev->kobj);
if (ret) {
pr_err("fail to create cpu temprature interface!\n");
pr_err("fail to create cpu temperature interface!\n");
goto fail_create_sysfs_cputemp_files;
}

View file

@ -477,7 +477,7 @@ static int asd_init_chip(struct asd_ha_struct *asd_ha)
err = asd_start_seqs(asd_ha);
if (err) {
asd_printk("coudln't start seqs for %s\n",
asd_printk("couldn't start seqs for %s\n",
pci_name(asd_ha->pcidev));
goto out;
}

View file

@ -1352,7 +1352,7 @@ int asd_start_seqs(struct asd_ha_struct *asd_ha)
for_each_sequencer(lseq_mask, lseq_mask, lseq) {
err = asd_seq_start_lseq(asd_ha, lseq);
if (err) {
asd_printk("coudln't start LSEQ %d for %s\n", lseq,
asd_printk("couldn't start LSEQ %d for %s\n", lseq,
pci_name(asd_ha->pcidev));
return err;
}

View file

@ -794,7 +794,7 @@ static void port_timeout(unsigned long data)
* case stay in the stopped state.
*/
dev_err(sciport_to_dev(iport),
"%s: SCIC Port 0x%p failed to stop before tiemout.\n",
"%s: SCIC Port 0x%p failed to stop before timeout.\n",
__func__,
iport);
} else if (current_state == SCI_PORT_STOPPING) {

View file

@ -3222,7 +3222,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
if (!ha->fcp_prio_cfg) {
ql_log(ql_log_warn, vha, 0x00d5,
"Unable to allocate memory for fcp priorty data (%x).\n",
"Unable to allocate memory for fcp priority data (%x).\n",
FCP_PRIO_CFG_SIZE);
return QLA_FUNCTION_FAILED;
}

View file

@ -293,7 +293,7 @@ static int chap_server_compute_md5(
pr_debug("[server] MD5 Digests do not match!\n\n");
goto out;
} else
pr_debug("[server] MD5 Digests match, CHAP connetication"
pr_debug("[server] MD5 Digests match, CHAP connection"
" successful.\n\n");
/*
* One way authentication has succeeded, return now if mutual

View file

@ -62,7 +62,7 @@ struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx);
/*
* Request and set direction of modem control lines GPIOs and sets up irq
* Request and set direction of modem control line GPIOs and set up irq
* handling.
* devm_* functions are used, so there's no need to call mctrl_gpio_free().
* Returns a pointer to the allocated mctrl structure if ok, -ENOMEM on
@ -71,7 +71,7 @@ struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx);
/*
* Request and set direction of modem control lines GPIOs.
* Request and set direction of modem control line GPIOs.
* devm_* functions are used, so there's no need to call mctrl_gpio_free().
* Returns a pointer to the allocated mctrl structure if ok, -ENOMEM on
* allocation error.

View file

@ -352,7 +352,7 @@ int w1_reset_bus(struct w1_master *dev)
w1_delay(70);
result = dev->bus_master->read_bit(dev->bus_master->data) & 0x1;
/* minmum 70 (above) + 430 = 500 us
/* minimum 70 (above) + 430 = 500 us
* There aren't any timing requirements between a reset and
* the following transactions. Sleeping is safe here.
*/

View file

@ -262,7 +262,7 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
ret = register_restart_handler(&wdd->restart_nb);
if (ret)
pr_warn("watchog%d: Cannot register restart handler (%d)\n",
pr_warn("watchdog%d: Cannot register restart handler (%d)\n",
wdd->id, ret);
}

View file

@ -122,7 +122,7 @@ static int parse_options(char *options, struct exofs_mountopt *opts)
if (match_int(&args[0], &option))
return -EINVAL;
if (option <= 0) {
EXOFS_ERR("Timout must be > 0");
EXOFS_ERR("Timeout must be > 0");
return -EINVAL;
}
opts->timeout = option * HZ;

View file

@ -303,7 +303,7 @@ int jbd2_journal_recover(journal_t *journal)
* Locate any valid recovery information from the journal and set up the
* journal structures in memory to ignore it (presumably because the
* caller has evidence that it is out of date).
* This function does'nt appear to be exorted..
* This function doesn't appear to be exported..
*
* We perform one pass over the journal to allow us to tell the user how
* much recovery information is being erased, and to let us initialise

View file

@ -543,7 +543,7 @@ EXPORT_SYMBOL(jbd2_journal_start_reserved);
*
* Some transactions, such as large extends and truncates, can be done
* atomically all at once or in several stages. The operation requests
* a credit for a number of buffer modications in advance, but can
* a credit for a number of buffer modifications in advance, but can
* extend its credit if it needs more.
*
* jbd2_journal_extend tries to give the running handle more buffer credits.
@ -627,7 +627,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
* If the jbd2_journal_extend() call above fails to grant new buffer credits
* to a running handle, a call to jbd2_journal_restart will commit the
* handle's transaction so far and reattach the handle to a new
* transaction capabable of guaranteeing the requested number of
* transaction capable of guaranteeing the requested number of
* credits. We preserve reserved handle if there's any attached to the
* passed in handle.
*/
@ -1586,7 +1586,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
/**
* int jbd2_journal_stop() - complete a transaction
* @handle: tranaction to complete.
* @handle: transaction to complete.
*
* All done for a particular handle.
*

View file

@ -285,7 +285,7 @@ static void put_super(struct super_block *sb)
* deactivate_locked_super - drop an active reference to superblock
* @s: superblock to deactivate
*
* Drops an active reference to superblock, converting it into a temprory
* Drops an active reference to superblock, converting it into a temporary
* one if there is no other active references left. In that case we
* tell fs driver to shut it down and drop the temporary reference we
* had just acquired.

View file

@ -389,7 +389,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
sock->state = state;
sock_init_data(sock, sk);
if (tipc_sk_insert(tsk)) {
pr_warn("Socket create failed; port numbrer exhausted\n");
pr_warn("Socket create failed; port number exhausted\n");
return -EINVAL;
}
msg_set_origport(msg, tsk->portid);

View file

@ -73,7 +73,7 @@ int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused)
}
/*
* Here we need to explicitely preallocate the counts, as if
* Here we need to explicitly preallocate the counts, as if
* we use the auto allocation it will allocate just for 1 cpu,
* as we start by cpu 0.
*/

View file

@ -827,7 +827,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
perf_evsel__set_sample_bit(evsel, PERIOD);
/*
* When the user explicitely disabled time don't force it here.
* When the user explicitly disabled time don't force it here.
*/
if (opts->sample_time &&
(!perf_missing_features.sample_id_all &&

View file

@ -362,7 +362,7 @@ static void machine__update_thread_pid(struct machine *machine,
}
/*
* Caller must eventually drop thread->refcnt returned with a successfull
* Caller must eventually drop thread->refcnt returned with a successful
* lookup/new thread inserted.
*/
static struct thread *____machine__findnew_thread(struct machine *machine,

View file

@ -1649,7 +1649,7 @@ static void parse_events_print_error(struct parse_events_error *err,
buf = _buf;
/* We're cutting from the beggining. */
/* We're cutting from the beginning. */
if (err->idx > max_err_idx)
cut = err->idx - max_err_idx;

View file

@ -557,7 +557,7 @@ static u8 revbyte(u8 b)
/*
* XXX this is hack in attempt to carry flags bitfield
* throught endian village. ABI says:
* through endian village. ABI says:
*
* Bit-fields are allocated from right to left (least to most significant)
* on little-endian implementations and from left to right (most to least