genirq: Fix various typos in comments

Go over the IRQ subsystem source code (including irqchip drivers) and
fix common typos in comments.

No change in functionality intended.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
This commit is contained in:
Ingo Molnar
2018-12-03 11:44:51 +01:00
parent 989a4222c1
commit c5f48c0a7a
9 changed files with 15 additions and 15 deletions

View File

@@ -929,7 +929,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
break;
/*
* Bail out if the outer chip is not set up
* and the interrrupt supposed to be started
* and the interrupt supposed to be started
* right away.
*/
if (WARN_ON(is_chained))

View File

@@ -56,7 +56,7 @@ int irq_reserve_ipi(struct irq_domain *domain,
unsigned int next;
/*
* The IPI requires a seperate HW irq on each CPU. We require
* The IPI requires a separate HW irq on each CPU. We require
* that the destination mask is consecutive. If an
* implementation needs to support holes, it can reserve
* several IPI ranges.
@@ -172,7 +172,7 @@ irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
/*
* Get the real hardware irq number if the underlying implementation
* uses a seperate irq per cpu. If the underlying implementation uses
* uses a separate irq per cpu. If the underlying implementation uses
* a single hardware irq for all cpus then the IPI send mechanism
* needs to take care of the cpu destinations.
*/

View File

@@ -915,7 +915,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
#endif
/*
* Interrupts which are not explicitely requested as threaded
* Interrupts which are not explicitly requested as threaded
* interrupts rely on the implicit bh/preempt disable of the hard irq
* context. So we need to disable bh here to avoid deadlocks and other
* side effects.

View File

@@ -66,7 +66,7 @@ static int try_one_irq(struct irq_desc *desc, bool force)
raw_spin_lock(&desc->lock);
/*
* PER_CPU, nested thread interrupts and interrupts explicitely
* PER_CPU, nested thread interrupts and interrupts explicitly
* marked polled are excluded from polling.
*/
if (irq_settings_is_per_cpu(desc) ||
@@ -76,7 +76,7 @@ static int try_one_irq(struct irq_desc *desc, bool force)
/*
* Do not poll disabled interrupts unless the spurious
* disabled poller asks explicitely.
* disabled poller asks explicitly.
*/
if (irqd_irq_disabled(&desc->irq_data) && !force)
goto out;
@@ -292,7 +292,7 @@ void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
* So in case a thread is woken, we just note the fact and
* defer the analysis to the next hardware interrupt.
*
* The threaded handlers store whether they sucessfully
* The threaded handlers store whether they successfully
* handled an interrupt and we check whether that number
* changed versus the last invocation.
*