Fix common misspellings
Fixes generated by 'codespell' and manually reviewed. Signed-off-by: Lucas De Marchi <lucas.demarchi@profusion.mobi>
This commit is contained in:
@@ -99,7 +99,7 @@ void __init btext_prepare_BAT(void)
|
||||
|
||||
/* This function can be used to enable the early boot text when doing
|
||||
* OF booting or within bootx init. It must be followed by a btext_unmap()
|
||||
* call before the logical address becomes unuseable
|
||||
* call before the logical address becomes unusable
|
||||
*/
|
||||
void __init btext_setup_display(int width, int height, int depth, int pitch,
|
||||
unsigned long address)
|
||||
|
@@ -379,7 +379,7 @@ interrupt_end_book3e:
|
||||
mfspr r13,SPRN_SPRG_PACA /* get our PACA */
|
||||
b system_call_common
|
||||
|
||||
/* Auxillary Processor Unavailable Interrupt */
|
||||
/* Auxiliary Processor Unavailable Interrupt */
|
||||
START_EXCEPTION(ap_unavailable);
|
||||
NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE)
|
||||
EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP)
|
||||
|
@@ -5,7 +5,7 @@
|
||||
* handling and other fixed offset specific things.
|
||||
*
|
||||
* This file is meant to be #included from head_64.S due to
|
||||
* position dependant assembly.
|
||||
* position dependent assembly.
|
||||
*
|
||||
* Most of this originates from head_64.S and thus has the same
|
||||
* copyright history.
|
||||
|
@@ -766,7 +766,7 @@ DataAccess:
|
||||
* miss get to this point to load the TLB.
|
||||
* r10 - TLB_TAG value
|
||||
* r11 - Linux PTE
|
||||
* r12, r9 - avilable to use
|
||||
* r12, r9 - available to use
|
||||
* PID - loaded with proper value when we get here
|
||||
* Upon exit, we reload everything and RFI.
|
||||
* Actually, it will fit now, but oh well.....a common place
|
||||
|
@@ -178,7 +178,7 @@ interrupt_base:
|
||||
NORMAL_EXCEPTION_PROLOG
|
||||
EXC_XFER_EE_LITE(0x0c00, DoSyscall)
|
||||
|
||||
/* Auxillary Processor Unavailable Interrupt */
|
||||
/* Auxiliary Processor Unavailable Interrupt */
|
||||
EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
|
||||
|
||||
/* Decrementer Interrupt */
|
||||
|
@@ -40,7 +40,7 @@
|
||||
#include <asm/kvm_book3s_asm.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
/* The physical memory is layed out such that the secondary processor
|
||||
/* The physical memory is laid out such that the secondary processor
|
||||
* spin code sits at 0x0000...0x00ff. On server, the vectors follow
|
||||
* using the layout described in exceptions-64s.S
|
||||
*/
|
||||
|
@@ -326,7 +326,7 @@ interrupt_base:
|
||||
NORMAL_EXCEPTION_PROLOG
|
||||
EXC_XFER_EE_LITE(0x0c00, DoSyscall)
|
||||
|
||||
/* Auxillary Processor Unavailable Interrupt */
|
||||
/* Auxiliary Processor Unavailable Interrupt */
|
||||
EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
|
||||
|
||||
/* Decrementer Interrupt */
|
||||
|
@@ -151,7 +151,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
/**** Might be a good idea to set L2DO here - to prevent instructions
|
||||
from getting into the cache. But since we invalidate
|
||||
the next time we enable the cache it doesn't really matter.
|
||||
Don't do this unless you accomodate all processor variations.
|
||||
Don't do this unless you accommodate all processor variations.
|
||||
The bit moved on the 7450.....
|
||||
****/
|
||||
|
||||
|
@@ -262,7 +262,7 @@ static void parse_ppp_data(struct seq_file *m)
|
||||
seq_printf(m, "system_active_processors=%d\n",
|
||||
ppp_data.active_system_procs);
|
||||
|
||||
/* pool related entries are apropriate for shared configs */
|
||||
/* pool related entries are appropriate for shared configs */
|
||||
if (lppaca_of(0).shared_proc) {
|
||||
unsigned long pool_idle_time, pool_procs;
|
||||
|
||||
|
@@ -759,7 +759,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
|
||||
|
||||
/*
|
||||
* If group events scheduling transaction was started,
|
||||
* skip the schedulability test here, it will be peformed
|
||||
* skip the schedulability test here, it will be performed
|
||||
* at commit time(->commit_txn) as a whole
|
||||
*/
|
||||
if (cpuhw->group_flag & PERF_EVENT_TXN)
|
||||
|
@@ -15,7 +15,7 @@
|
||||
|
||||
/*
|
||||
* Grab the register values as they are now.
|
||||
* This won't do a particularily good job because we really
|
||||
* This won't do a particularly good job because we really
|
||||
* want our caller's caller's registers, and our caller has
|
||||
* already executed its prologue.
|
||||
* ToDo: We could reach back into the caller's save area to do
|
||||
|
@@ -683,7 +683,7 @@ void __init early_init_devtree(void *params)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PHYP_DUMP
|
||||
/* scan tree to see if dump occured during last boot */
|
||||
/* scan tree to see if dump occurred during last boot */
|
||||
of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
|
||||
#endif
|
||||
|
||||
@@ -739,7 +739,7 @@ void __init early_init_devtree(void *params)
|
||||
|
||||
DBG("Scanning CPUs ...\n");
|
||||
|
||||
/* Retreive CPU related informations from the flat tree
|
||||
/* Retrieve CPU related informations from the flat tree
|
||||
* (altivec support, boot CPU ID, ...)
|
||||
*/
|
||||
of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
|
||||
|
@@ -463,7 +463,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
#ifdef CONFIG_VSX
|
||||
/*
|
||||
* Currently to set and and get all the vsx state, you need to call
|
||||
* the fp and VMX calls aswell. This only get/sets the lower 32
|
||||
* the fp and VMX calls as well. This only get/sets the lower 32
|
||||
* 128bit VSX registers.
|
||||
*/
|
||||
|
||||
|
@@ -465,7 +465,7 @@ static void start_event_scan(void)
|
||||
pr_debug("rtasd: will sleep for %d milliseconds\n",
|
||||
(30000 / rtas_event_scan_rate));
|
||||
|
||||
/* Retreive errors from nvram if any */
|
||||
/* Retrieve errors from nvram if any */
|
||||
retreive_nvram_error_log();
|
||||
|
||||
schedule_delayed_work_on(cpumask_first(cpu_online_mask),
|
||||
|
@@ -143,7 +143,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
|
||||
/* Disable MSR:DR to make sure we don't take a TLB or
|
||||
* hash miss during the copy, as our hash table will
|
||||
* for a while be unuseable. For .text, we assume we are
|
||||
* for a while be unusable. For .text, we assume we are
|
||||
* covered by a BAT. This works only for non-G5 at this
|
||||
* point. G5 will need a better approach, possibly using
|
||||
* a small temporary hash table filled with large mappings,
|
||||
|
@@ -959,7 +959,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
|
||||
* ESR_DST (!?) or 0. In the process of chasing this with the
|
||||
* hardware people - not sure if it can happen on any illegal
|
||||
* instruction or only on FP instructions, whether there is a
|
||||
* pattern to occurences etc. -dgibson 31/Mar/2003 */
|
||||
* pattern to occurrences etc. -dgibson 31/Mar/2003 */
|
||||
switch (do_mathemu(regs)) {
|
||||
case 0:
|
||||
emulate_single_step(regs);
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* udbg for NS16550 compatable serial ports
|
||||
* udbg for NS16550 compatible serial ports
|
||||
*
|
||||
* Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
|
||||
*
|
||||
|
@@ -19,7 +19,7 @@
|
||||
|
||||
/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from
|
||||
the return address to get an address in the middle of the presumed
|
||||
call instruction. Since we don't have a call here, we artifically
|
||||
call instruction. Since we don't have a call here, we artificially
|
||||
extend the range covered by the unwind info by adding a nop before
|
||||
the real start. */
|
||||
nop
|
||||
|
@@ -20,7 +20,7 @@
|
||||
|
||||
/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from
|
||||
the return address to get an address in the middle of the presumed
|
||||
call instruction. Since we don't have a call here, we artifically
|
||||
call instruction. Since we don't have a call here, we artificially
|
||||
extend the range covered by the unwind info by padding before the
|
||||
real start. */
|
||||
nop
|
||||
|
Reference in New Issue
Block a user