Merge branches 'amba', 'devel-stable', 'fixes', 'mach-types', 'mmci', 'pci' and 'versatile' into for-linus

This commit is contained in:
Russell King
2012-05-21 15:15:10 +01:00
630 changed files with 5994 additions and 3849 deletions

View File

@@ -34,6 +34,7 @@ obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o

View File

@@ -0,0 +1,350 @@
/*
* linux/arch/arm/kernel/arch_timer.c
*
* Copyright (C) 2011 ARM Ltd.
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/jiffies.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/io.h>
#include <asm/cputype.h>
#include <asm/localtimer.h>
#include <asm/arch_timer.h>
#include <asm/system_info.h>
#include <asm/sched_clock.h>
static unsigned long arch_timer_rate;
static int arch_timer_ppi;
static int arch_timer_ppi2;
static struct clock_event_device __percpu **arch_timer_evt;
/*
* Architected system timer support.
*/
#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
#define ARCH_TIMER_REG_CTRL 0
#define ARCH_TIMER_REG_FREQ 1
#define ARCH_TIMER_REG_TVAL 2
static void arch_timer_reg_write(int reg, u32 val)
{
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
break;
case ARCH_TIMER_REG_TVAL:
asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
break;
}
isb();
}
static u32 arch_timer_reg_read(int reg)
{
u32 val;
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
break;
case ARCH_TIMER_REG_FREQ:
asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
break;
case ARCH_TIMER_REG_TVAL:
asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
break;
default:
BUG();
}
return val;
}
static irqreturn_t arch_timer_handler(int irq, void *dev_id)
{
struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
unsigned long ctrl;
ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
evt->event_handler(evt);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static void arch_timer_disable(void)
{
unsigned long ctrl;
ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
}
static void arch_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *clk)
{
switch (mode) {
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
arch_timer_disable();
break;
default:
break;
}
}
static int arch_timer_set_next_event(unsigned long evt,
struct clock_event_device *unused)
{
unsigned long ctrl;
ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt);
arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
return 0;
}
static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
{
/* Be safe... */
arch_timer_disable();
clk->features = CLOCK_EVT_FEAT_ONESHOT;
clk->name = "arch_sys_timer";
clk->rating = 450;
clk->set_mode = arch_timer_set_mode;
clk->set_next_event = arch_timer_set_next_event;
clk->irq = arch_timer_ppi;
clockevents_config_and_register(clk, arch_timer_rate,
0xf, 0x7fffffff);
*__this_cpu_ptr(arch_timer_evt) = clk;
enable_percpu_irq(clk->irq, 0);
if (arch_timer_ppi2)
enable_percpu_irq(arch_timer_ppi2, 0);
return 0;
}
/* Is the optional system timer available? */
static int local_timer_is_architected(void)
{
return (cpu_architecture() >= CPU_ARCH_ARMv7) &&
((read_cpuid_ext(CPUID_EXT_PFR1) >> 16) & 0xf) == 1;
}
static int arch_timer_available(void)
{
unsigned long freq;
if (!local_timer_is_architected())
return -ENXIO;
if (arch_timer_rate == 0) {
arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0);
freq = arch_timer_reg_read(ARCH_TIMER_REG_FREQ);
/* Check the timer frequency. */
if (freq == 0) {
pr_warn("Architected timer frequency not available\n");
return -EINVAL;
}
arch_timer_rate = freq;
}
pr_info_once("Architected local timer running at %lu.%02luMHz.\n",
arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100);
return 0;
}
static inline cycle_t arch_counter_get_cntpct(void)
{
u32 cvall, cvalh;
asm volatile("mrrc p15, 0, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
return ((cycle_t) cvalh << 32) | cvall;
}
static inline cycle_t arch_counter_get_cntvct(void)
{
u32 cvall, cvalh;
asm volatile("mrrc p15, 1, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
return ((cycle_t) cvalh << 32) | cvall;
}
static u32 notrace arch_counter_get_cntvct32(void)
{
cycle_t cntvct = arch_counter_get_cntvct();
/*
* The sched_clock infrastructure only knows about counters
* with at most 32bits. Forget about the upper 24 bits for the
* time being...
*/
return (u32)(cntvct & (u32)~0);
}
static cycle_t arch_counter_read(struct clocksource *cs)
{
return arch_counter_get_cntpct();
}
static struct clocksource clocksource_counter = {
.name = "arch_sys_counter",
.rating = 400,
.read = arch_counter_read,
.mask = CLOCKSOURCE_MASK(56),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
{
pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
clk->irq, smp_processor_id());
disable_percpu_irq(clk->irq);
if (arch_timer_ppi2)
disable_percpu_irq(arch_timer_ppi2);
arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
}
static struct local_timer_ops arch_timer_ops __cpuinitdata = {
.setup = arch_timer_setup,
.stop = arch_timer_stop,
};
static struct clock_event_device arch_timer_global_evt;
static int __init arch_timer_register(void)
{
int err;
err = arch_timer_available();
if (err)
return err;
arch_timer_evt = alloc_percpu(struct clock_event_device *);
if (!arch_timer_evt)
return -ENOMEM;
clocksource_register_hz(&clocksource_counter, arch_timer_rate);
err = request_percpu_irq(arch_timer_ppi, arch_timer_handler,
"arch_timer", arch_timer_evt);
if (err) {
pr_err("arch_timer: can't register interrupt %d (%d)\n",
arch_timer_ppi, err);
goto out_free;
}
if (arch_timer_ppi2) {
err = request_percpu_irq(arch_timer_ppi2, arch_timer_handler,
"arch_timer", arch_timer_evt);
if (err) {
pr_err("arch_timer: can't register interrupt %d (%d)\n",
arch_timer_ppi2, err);
arch_timer_ppi2 = 0;
goto out_free_irq;
}
}
err = local_timer_register(&arch_timer_ops);
if (err) {
/*
* We couldn't register as a local timer (could be
* because we're on a UP platform, or because some
* other local timer is already present...). Try as a
* global timer instead.
*/
arch_timer_global_evt.cpumask = cpumask_of(0);
err = arch_timer_setup(&arch_timer_global_evt);
}
if (err)
goto out_free_irq;
return 0;
out_free_irq:
free_percpu_irq(arch_timer_ppi, arch_timer_evt);
if (arch_timer_ppi2)
free_percpu_irq(arch_timer_ppi2, arch_timer_evt);
out_free:
free_percpu(arch_timer_evt);
return err;
}
static const struct of_device_id arch_timer_of_match[] __initconst = {
{ .compatible = "arm,armv7-timer", },
{},
};
int __init arch_timer_of_register(void)
{
struct device_node *np;
u32 freq;
np = of_find_matching_node(NULL, arch_timer_of_match);
if (!np) {
pr_err("arch_timer: can't find DT node\n");
return -ENODEV;
}
/* Try to determine the frequency from the device tree or CNTFRQ */
if (!of_property_read_u32(np, "clock-frequency", &freq))
arch_timer_rate = freq;
arch_timer_ppi = irq_of_parse_and_map(np, 0);
arch_timer_ppi2 = irq_of_parse_and_map(np, 1);
pr_info("arch_timer: found %s irqs %d %d\n",
np->name, arch_timer_ppi, arch_timer_ppi2);
return arch_timer_register();
}
int __init arch_timer_sched_clock_init(void)
{
int err;
err = arch_timer_available();
if (err)
return err;
setup_sched_clock(arch_counter_get_cntvct32, 32, arch_timer_rate);
return 0;
}

View File

@@ -374,16 +374,29 @@ EXPORT_SYMBOL(pcibios_fixup_bus);
#endif
/*
* Swizzle the device pin each time we cross a bridge.
* This might update pin and returns the slot number.
* Swizzle the device pin each time we cross a bridge. If a platform does
* not provide a swizzle function, we perform the standard PCI swizzling.
*
* The default swizzling walks up the bus tree one level at a time, applying
* the standard swizzle function at each step, stopping when it finds the PCI
* root bus. This will return the slot number of the bridge device on the
* root bus and the interrupt pin on that device which should correspond
* with the downstream device interrupt.
*
* Platforms may override this, in which case the slot and pin returned
* depend entirely on the platform code. However, please note that the
* PCI standard swizzle is implemented on plug-in cards and Cardbus based
* PCI extenders, so it can not be ignored.
*/
static u8 __devinit pcibios_swizzle(struct pci_dev *dev, u8 *pin)
{
struct pci_sys_data *sys = dev->sysdata;
int slot = 0, oldpin = *pin;
int slot, oldpin = *pin;
if (sys->swizzle)
slot = sys->swizzle(dev, pin);
else
slot = pci_common_swizzle(dev, pin);
if (debug_pci)
printk("PCI: %s swizzling pin %d => pin %d slot %d\n",
@@ -410,7 +423,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq;
}
static void __init pcibios_init_hw(struct hw_pci *hw)
static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
{
struct pci_sys_data *sys = NULL;
int ret;
@@ -424,7 +437,6 @@ static void __init pcibios_init_hw(struct hw_pci *hw)
#ifdef CONFIG_PCI_DOMAINS
sys->domain = hw->domain;
#endif
sys->hw = hw;
sys->busnr = busnr;
sys->swizzle = hw->swizzle;
sys->map_irq = hw->map_irq;
@@ -440,14 +452,18 @@ static void __init pcibios_init_hw(struct hw_pci *hw)
&iomem_resource, sys->mem_offset);
}
sys->bus = hw->scan(nr, sys);
if (hw->scan)
sys->bus = hw->scan(nr, sys);
else
sys->bus = pci_scan_root_bus(NULL, sys->busnr,
hw->ops, sys, &sys->resources);
if (!sys->bus)
panic("PCI: unable to scan bus!");
busnr = sys->bus->subordinate + 1;
list_add(&sys->node, &hw->buses);
list_add(&sys->node, head);
} else {
kfree(sys);
if (ret < 0)
@@ -459,19 +475,18 @@ static void __init pcibios_init_hw(struct hw_pci *hw)
void __init pci_common_init(struct hw_pci *hw)
{
struct pci_sys_data *sys;
INIT_LIST_HEAD(&hw->buses);
LIST_HEAD(head);
pci_add_flags(PCI_REASSIGN_ALL_RSRC);
if (hw->preinit)
hw->preinit();
pcibios_init_hw(hw);
pcibios_init_hw(hw, &head);
if (hw->postinit)
hw->postinit();
pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
list_for_each_entry(sys, &hw->buses, node) {
list_for_each_entry(sys, &head, node) {
struct pci_bus *bus = sys->bus;
if (!pci_has_flag(PCI_PROBE_ONLY)) {

View File

@@ -155,10 +155,10 @@ static bool migrate_one_irq(struct irq_desc *desc)
}
c = irq_data_get_irq_chip(d);
if (c->irq_set_affinity)
c->irq_set_affinity(d, affinity, true);
else
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
cpumask_copy(d->affinity, affinity);
return ret;
}

View File

@@ -906,27 +906,14 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
#ifdef __ARMEB__
#define AUDIT_ARCH_NR AUDIT_ARCH_ARMEB
#else
#define AUDIT_ARCH_NR AUDIT_ARCH_ARM
#endif
asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
{
unsigned long ip;
/*
* Save IP. IP is used to denote syscall entry/exit:
* IP = 0 -> entry, = 1 -> exit
*/
ip = regs->ARM_ip;
regs->ARM_ip = why;
if (!ip)
if (why)
audit_syscall_exit(regs);
else
audit_syscall_entry(AUDIT_ARCH_NR, scno, regs->ARM_r0,
audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
if (!test_thread_flag(TIF_SYSCALL_TRACE))
@@ -936,6 +923,13 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
current_thread_info()->syscall = scno;
/*
* IP is used to denote syscall entry/exit:
* IP = 0 -> entry, =1 -> exit
*/
ip = regs->ARM_ip;
regs->ARM_ip = why;
/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)

View File

@@ -180,44 +180,23 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
static int preserve_vfp_context(struct vfp_sigframe __user *frame)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *h = &thread->vfpstate.hard;
const unsigned long magic = VFP_MAGIC;
const unsigned long size = VFP_STORAGE_SIZE;
int err = 0;
vfp_sync_hwstate(thread);
__put_user_error(magic, &frame->magic, err);
__put_user_error(size, &frame->size, err);
/*
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs,
sizeof(h->fpregs));
/*
* Copy the status and control register.
*/
__put_user_error(h->fpscr, &frame->ufp.fpscr, err);
if (err)
return -EFAULT;
/*
* Copy the exception registers.
*/
__put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err);
__put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
__put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
return err ? -EFAULT : 0;
return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
}
static int restore_vfp_context(struct vfp_sigframe __user *frame)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *h = &thread->vfpstate.hard;
unsigned long magic;
unsigned long size;
unsigned long fpexc;
int err = 0;
__get_user_error(magic, &frame->magic, err);
@@ -228,33 +207,7 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL;
vfp_flush_hwstate(thread);
/*
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs,
sizeof(h->fpregs));
/*
* Copy the status and control register.
*/
__get_user_error(h->fpscr, &frame->ufp.fpscr, err);
/*
* Sanitise and restore the exception registers.
*/
__get_user_error(fpexc, &frame->ufp_exc.fpexc, err);
/* Ensure the VFP is enabled. */
fpexc |= FPEXC_EN;
/* Ensure FPINST2 is invalid and the exception flag is cleared. */
fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
h->fpexc = fpexc;
__get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
__get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
return err ? -EFAULT : 0;
return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
}
#endif

View File

@@ -251,8 +251,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id();
printk("CPU%u: Booted secondary processor\n", cpu);
/*
* All kernel threads share the same mm context; grab a
* reference and switch to it.
@@ -264,6 +262,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
enter_lazy_tlb(mm, current);
local_flush_tlb_all();
printk("CPU%u: Booted secondary processor\n", cpu);
cpu_init();
preempt_disable();
trace_hardirqs_off();
@@ -454,6 +454,9 @@ static struct local_timer_ops *lt_ops;
#ifdef CONFIG_LOCAL_TIMERS
int local_timer_register(struct local_timer_ops *ops)
{
if (!is_smp() || !setup_max_cpus)
return -ENXIO;
if (lt_ops)
return -EBUSY;
@@ -510,10 +513,6 @@ static void ipi_cpu_stop(unsigned int cpu)
local_fiq_disable();
local_irq_disable();
#ifdef CONFIG_HOTPLUG_CPU
platform_cpu_kill(cpu);
#endif
while (1)
cpu_relax();
}
@@ -576,17 +575,25 @@ void smp_send_reschedule(int cpu)
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
#ifdef CONFIG_HOTPLUG_CPU
static void smp_kill_cpus(cpumask_t *mask)
{
unsigned int cpu;
for_each_cpu(cpu, mask)
platform_cpu_kill(cpu);
}
#else
static void smp_kill_cpus(cpumask_t *mask) { }
#endif
void smp_send_stop(void)
{
unsigned long timeout;
struct cpumask mask;
if (num_online_cpus() > 1) {
struct cpumask mask;
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
smp_cross_call(&mask, IPI_CPU_STOP);
}
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
smp_cross_call(&mask, IPI_CPU_STOP);
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC;
@@ -595,6 +602,8 @@ void smp_send_stop(void)
if (num_online_cpus() > 1)
pr_warning("SMP: failed to stop secondary CPUs\n");
smp_kill_cpus(&mask);
}
/*

View File

@@ -118,14 +118,10 @@ static int twd_cpufreq_transition(struct notifier_block *nb,
* The twd clock events must be reprogrammed to account for the new
* frequency. The timer is local to a cpu, so cross-call to the
* changing cpu.
*
* Only wait for it to finish, if the cpu is active to avoid
* deadlock when cpu1 is spinning on while(!cpu_active(cpu1)) during
* booting of that cpu.
*/
if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
smp_call_function_single(freqs->cpu, twd_update_frequency,
NULL, cpu_active(freqs->cpu));
NULL, 1);
return NOTIFY_OK;
}

View File

@@ -115,7 +115,7 @@ int kernel_execve(const char *filename,
"Ir" (THREAD_START_SP - sizeof(regs)),
"r" (&regs),
"Ir" (sizeof(regs))
: "r0", "r1", "r2", "r3", "ip", "lr", "memory");
: "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
out:
return ret;