Blackfin arch: Update adeos blackfin arch patch to 1.9-00

Signed-off-by: Philippe Gerum <rpm@xenomai.org>
Signed-off-by: Bryan Wu <cooloney@kernel.org>
This commit is contained in:
Philippe Gerum
2009-03-04 16:52:38 +08:00
committed by Bryan Wu
parent 97d4b35fb4
commit 9bd50df6aa
10 changed files with 243 additions and 305 deletions

View File

@@ -35,14 +35,8 @@
#include <asm/atomic.h>
#include <asm/io.h>
static int create_irq_threads;
DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
static DEFINE_PER_CPU(unsigned long, pending_irqthread_mask);
static DEFINE_PER_CPU(int [IVG13 + 1], pending_irq_count);
asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
static void __ipipe_no_irqtail(void);
@@ -93,6 +87,7 @@ void __ipipe_enable_pipeline(void)
*/
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
{
struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
struct ipipe_domain *this_domain, *next_domain;
struct list_head *head, *pos;
int m_ack, s = -1;
@@ -104,7 +99,6 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
* interrupt.
*/
m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
this_domain = ipipe_current_domain;
if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
@@ -114,49 +108,28 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
next_domain = list_entry(head, struct ipipe_domain, p_link);
if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
s = __test_and_set_bit(IPIPE_STALL_FLAG,
&ipipe_root_cpudom_var(status));
next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
__ipipe_dispatch_wired(next_domain, irq);
goto finalize;
return;
goto out;
}
}
/* Ack the interrupt. */
pos = head;
while (pos != &__ipipe_pipeline) {
next_domain = list_entry(pos, struct ipipe_domain, p_link);
/*
* For each domain handling the incoming IRQ, mark it
* as pending in its log.
*/
if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
/*
* Domains that handle this IRQ are polled for
* acknowledging it by decreasing priority
* order. The interrupt must be made pending
* _first_ in the domain's status flags before
* the PIC is unlocked.
*/
__ipipe_set_irq_pending(next_domain, irq);
if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
m_ack = 1;
}
}
/*
* If the domain does not want the IRQ to be passed
* down the interrupt pipe, exit the loop now.
*/
if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
break;
pos = next_domain->p_link.next;
}
@@ -166,18 +139,24 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
* immediately to the current domain if the interrupt has been
* marked as 'sticky'. This search does not go beyond the
* current domain in the pipeline. We also enforce the
* additional root stage lock (blackfin-specific). */
* additional root stage lock (blackfin-specific).
*/
if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
s = __test_and_set_bit(IPIPE_STALL_FLAG,
&ipipe_root_cpudom_var(status));
finalize:
/*
* If the interrupt preempted the head domain, then do not
* even try to walk the pipeline, unless an interrupt is
* pending for it.
*/
if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
ipipe_head_cpudom_var(irqpend_himask) == 0)
goto out;
__ipipe_walk_pipeline(head);
out:
if (!s)
__clear_bit(IPIPE_STALL_FLAG,
&ipipe_root_cpudom_var(status));
__clear_bit(IPIPE_STALL_FLAG, &p->status);
}
int __ipipe_check_root(void)
@@ -187,7 +166,7 @@ int __ipipe_check_root(void)
void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
{
struct irq_desc *desc = irq_desc + irq;
struct irq_desc *desc = irq_to_desc(irq);
int prio = desc->ic_prio;
desc->depth = 0;
@@ -199,7 +178,7 @@ EXPORT_SYMBOL(__ipipe_enable_irqdesc);
void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
{
struct irq_desc *desc = irq_desc + irq;
struct irq_desc *desc = irq_to_desc(irq);
int prio = desc->ic_prio;
if (ipd != &ipipe_root &&
@@ -236,15 +215,18 @@ int __ipipe_syscall_root(struct pt_regs *regs)
{
unsigned long flags;
/* We need to run the IRQ tail hook whenever we don't
/*
* We need to run the IRQ tail hook whenever we don't
* propagate a syscall to higher domains, because we know that
* important operations might be pending there (e.g. Xenomai
* deferred rescheduling). */
* deferred rescheduling).
*/
if (!__ipipe_syscall_watched_p(current, regs->orig_p0)) {
if (regs->orig_p0 < NR_syscalls) {
void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
hook();
return 0;
if ((current->flags & PF_EVNOTIFY) == 0)
return 0;
}
/*
@@ -312,112 +294,46 @@ int ipipe_trigger_irq(unsigned irq)
{
unsigned long flags;
#ifdef CONFIG_IPIPE_DEBUG
if (irq >= IPIPE_NR_IRQS ||
(ipipe_virtual_irq_p(irq)
&& !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
return -EINVAL;
#endif
local_irq_save_hw(flags);
__ipipe_handle_irq(irq, NULL);
local_irq_restore_hw(flags);
return 1;
}
/* Move Linux IRQ to threads. */
static int do_irqd(void *__desc)
asmlinkage void __ipipe_sync_root(void)
{
struct irq_desc *desc = __desc;
unsigned irq = desc - irq_desc;
int thrprio = desc->thr_prio;
int thrmask = 1 << thrprio;
int cpu = smp_processor_id();
cpumask_t cpumask;
unsigned long flags;
sigfillset(&current->blocked);
current->flags |= PF_NOFREEZE;
cpumask = cpumask_of_cpu(cpu);
set_cpus_allowed(current, cpumask);
ipipe_setscheduler_root(current, SCHED_FIFO, 50 + thrprio);
BUG_ON(irqs_disabled());
while (!kthread_should_stop()) {
local_irq_disable();
if (!(desc->status & IRQ_SCHEDULED)) {
set_current_state(TASK_INTERRUPTIBLE);
resched:
local_irq_enable();
schedule();
local_irq_disable();
}
__set_current_state(TASK_RUNNING);
/*
* If higher priority interrupt servers are ready to
* run, reschedule immediately. We need this for the
* GPIO demux IRQ handler to unmask the interrupt line
* _last_, after all GPIO IRQs have run.
*/
if (per_cpu(pending_irqthread_mask, cpu) & ~(thrmask|(thrmask-1)))
goto resched;
if (--per_cpu(pending_irq_count[thrprio], cpu) == 0)
per_cpu(pending_irqthread_mask, cpu) &= ~thrmask;
desc->status &= ~IRQ_SCHEDULED;
desc->thr_handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs));
local_irq_enable();
}
__set_current_state(TASK_RUNNING);
return 0;
local_irq_save_hw(flags);
clear_thread_flag(TIF_IRQ_SYNC);
if (ipipe_root_cpudom_var(irqpend_himask) != 0)
__ipipe_sync_pipeline(IPIPE_IRQMASK_ANY);
local_irq_restore_hw(flags);
}
static void kick_irqd(unsigned irq, void *cookie)
void ___ipipe_sync_pipeline(unsigned long syncmask)
{
struct irq_desc *desc = irq_desc + irq;
int thrprio = desc->thr_prio;
int thrmask = 1 << thrprio;
int cpu = smp_processor_id();
struct ipipe_domain *ipd = ipipe_current_domain;
if (!(desc->status & IRQ_SCHEDULED)) {
desc->status |= IRQ_SCHEDULED;
per_cpu(pending_irqthread_mask, cpu) |= thrmask;
++per_cpu(pending_irq_count[thrprio], cpu);
wake_up_process(desc->thread);
}
}
int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc)
{
if (desc->thread || !create_irq_threads)
return 0;
desc->thread = kthread_create(do_irqd, desc, "IRQ %d", irq);
if (desc->thread == NULL) {
printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq);
return -ENOMEM;
if (ipd == ipipe_root_domain) {
if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
return;
}
wake_up_process(desc->thread);
desc->thr_handler = ipipe_root_domain->irqs[irq].handler;
ipipe_root_domain->irqs[irq].handler = &kick_irqd;
return 0;
}
void __init ipipe_init_irq_threads(void)
{
unsigned irq;
struct irq_desc *desc;
create_irq_threads = 1;
for (irq = 0; irq < NR_IRQS; irq++) {
desc = irq_desc + irq;
if (desc->action != NULL ||
(desc->status & IRQ_NOREQUEST) != 0)
ipipe_start_irq_thread(irq, desc);
}
__ipipe_sync_stage(syncmask);
}
EXPORT_SYMBOL(show_stack);