Merge branch 'devel-stable' into for-linus
This commit is contained in:
@@ -16,7 +16,6 @@ generic-y += local64.h
|
||||
generic-y += msgbuf.h
|
||||
generic-y += param.h
|
||||
generic-y += parport.h
|
||||
generic-y += percpu.h
|
||||
generic-y += poll.h
|
||||
generic-y += resource.h
|
||||
generic-y += sections.h
|
||||
|
@@ -15,6 +15,7 @@
|
||||
|
||||
struct cpuinfo_arm {
|
||||
struct cpu cpu;
|
||||
u32 cpuid;
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int loops_per_jiffy;
|
||||
#endif
|
||||
|
@@ -25,6 +25,19 @@
|
||||
#define CPUID_EXT_ISAR4 "c2, 4"
|
||||
#define CPUID_EXT_ISAR5 "c2, 5"
|
||||
|
||||
#define MPIDR_SMP_BITMASK (0x3 << 30)
|
||||
#define MPIDR_SMP_VALUE (0x2 << 30)
|
||||
|
||||
#define MPIDR_MT_BITMASK (0x1 << 24)
|
||||
|
||||
#define MPIDR_HWID_BITMASK 0xFFFFFF
|
||||
|
||||
#define MPIDR_LEVEL_BITS 8
|
||||
#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
|
||||
|
||||
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
|
||||
((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
|
||||
|
||||
extern unsigned int processor_id;
|
||||
|
||||
#ifdef CONFIG_CPU_CP15
|
||||
|
@@ -146,15 +146,7 @@ static inline void cti_irq_ack(struct cti *cti)
|
||||
*/
|
||||
static inline void cti_unlock(struct cti *cti)
|
||||
{
|
||||
void __iomem *base = cti->base;
|
||||
unsigned long val;
|
||||
|
||||
val = __raw_readl(base + LOCKSTATUS);
|
||||
|
||||
if (val & 1) {
|
||||
val = LOCKCODE;
|
||||
__raw_writel(val, base + LOCKACCESS);
|
||||
}
|
||||
__raw_writel(LOCKCODE, cti->base + LOCKACCESS);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -166,14 +158,6 @@ static inline void cti_unlock(struct cti *cti)
|
||||
*/
|
||||
static inline void cti_lock(struct cti *cti)
|
||||
{
|
||||
void __iomem *base = cti->base;
|
||||
unsigned long val;
|
||||
|
||||
val = __raw_readl(base + LOCKSTATUS);
|
||||
|
||||
if (!(val & 1)) {
|
||||
val = ~LOCKCODE;
|
||||
__raw_writel(val, base + LOCKACCESS);
|
||||
}
|
||||
__raw_writel(~LOCKCODE, cti->base + LOCKACCESS);
|
||||
}
|
||||
#endif
|
||||
|
@@ -98,12 +98,12 @@ static inline void decode_ctrl_reg(u32 reg,
|
||||
#define ARM_BASE_WCR 112
|
||||
|
||||
/* Accessor macros for the debug registers. */
|
||||
#define ARM_DBG_READ(M, OP2, VAL) do {\
|
||||
asm volatile("mrc p14, 0, %0, c0," #M ", " #OP2 : "=r" (VAL));\
|
||||
#define ARM_DBG_READ(N, M, OP2, VAL) do {\
|
||||
asm volatile("mrc p14, 0, %0, " #N "," #M ", " #OP2 : "=r" (VAL));\
|
||||
} while (0)
|
||||
|
||||
#define ARM_DBG_WRITE(M, OP2, VAL) do {\
|
||||
asm volatile("mcr p14, 0, %0, c0," #M ", " #OP2 : : "r" (VAL));\
|
||||
#define ARM_DBG_WRITE(N, M, OP2, VAL) do {\
|
||||
asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\
|
||||
} while (0)
|
||||
|
||||
struct notifier_block;
|
||||
|
@@ -5,18 +5,15 @@
|
||||
|
||||
typedef struct {
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
unsigned int id;
|
||||
raw_spinlock_t id_lock;
|
||||
u64 id;
|
||||
#endif
|
||||
unsigned int kvm_seq;
|
||||
unsigned int vmalloc_seq;
|
||||
} mm_context_t;
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
#define ASID(mm) ((mm)->context.id & 255)
|
||||
|
||||
/* init_mm.context.id_lock should be initialized. */
|
||||
#define INIT_MM_CONTEXT(name) \
|
||||
.context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
|
||||
#define ASID_BITS 8
|
||||
#define ASID_MASK ((~0ULL) << ASID_BITS)
|
||||
#define ASID(mm) ((mm)->context.id & ~ASID_MASK)
|
||||
#else
|
||||
#define ASID(mm) (0)
|
||||
#endif
|
||||
|
@@ -20,88 +20,12 @@
|
||||
#include <asm/proc-fns.h>
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
void __check_kvm_seq(struct mm_struct *mm);
|
||||
void __check_vmalloc_seq(struct mm_struct *mm);
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
|
||||
/*
|
||||
* On ARMv6, we have the following structure in the Context ID:
|
||||
*
|
||||
* 31 7 0
|
||||
* +-------------------------+-----------+
|
||||
* | process ID | ASID |
|
||||
* +-------------------------+-----------+
|
||||
* | context ID |
|
||||
* +-------------------------------------+
|
||||
*
|
||||
* The ASID is used to tag entries in the CPU caches and TLBs.
|
||||
* The context ID is used by debuggers and trace logic, and
|
||||
* should be unique within all running processes.
|
||||
*/
|
||||
#define ASID_BITS 8
|
||||
#define ASID_MASK ((~0) << ASID_BITS)
|
||||
#define ASID_FIRST_VERSION (1 << ASID_BITS)
|
||||
|
||||
extern unsigned int cpu_last_asid;
|
||||
|
||||
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
||||
void __new_context(struct mm_struct *mm);
|
||||
void cpu_set_reserved_ttbr0(void);
|
||||
|
||||
static inline void switch_new_context(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
__new_context(mm);
|
||||
|
||||
local_irq_save(flags);
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void check_and_switch_context(struct mm_struct *mm,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
|
||||
__check_kvm_seq(mm);
|
||||
|
||||
/*
|
||||
* Required during context switch to avoid speculative page table
|
||||
* walking with the wrong TTBR.
|
||||
*/
|
||||
cpu_set_reserved_ttbr0();
|
||||
|
||||
if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
|
||||
/*
|
||||
* The ASID is from the current generation, just switch to the
|
||||
* new pgd. This condition is only true for calls from
|
||||
* context_switch() and interrupts are already disabled.
|
||||
*/
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
else if (irqs_disabled())
|
||||
/*
|
||||
* Defer the new ASID allocation until after the context
|
||||
* switch critical region since __new_context() cannot be
|
||||
* called with interrupts disabled (it sends IPIs).
|
||||
*/
|
||||
set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
|
||||
else
|
||||
/*
|
||||
* That is a direct call to switch_mm() or activate_mm() with
|
||||
* interrupts enabled and a new context.
|
||||
*/
|
||||
switch_new_context(mm);
|
||||
}
|
||||
|
||||
#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
|
||||
|
||||
#define finish_arch_post_lock_switch \
|
||||
finish_arch_post_lock_switch
|
||||
static inline void finish_arch_post_lock_switch(void)
|
||||
{
|
||||
if (test_and_clear_thread_flag(TIF_SWITCH_MM))
|
||||
switch_new_context(current->mm);
|
||||
}
|
||||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
|
||||
#define init_new_context(tsk,mm) ({ mm->context.id = 0; })
|
||||
|
||||
#else /* !CONFIG_CPU_HAS_ASID */
|
||||
|
||||
@@ -110,8 +34,8 @@ static inline void finish_arch_post_lock_switch(void)
|
||||
static inline void check_and_switch_context(struct mm_struct *mm,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
|
||||
__check_kvm_seq(mm);
|
||||
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
|
||||
__check_vmalloc_seq(mm);
|
||||
|
||||
if (irqs_disabled())
|
||||
/*
|
||||
@@ -143,6 +67,7 @@ static inline void finish_arch_post_lock_switch(void)
|
||||
#endif /* CONFIG_CPU_HAS_ASID */
|
||||
|
||||
#define destroy_context(mm) do { } while(0)
|
||||
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
|
||||
|
||||
/*
|
||||
* This is called when "tsk" is about to enter lazy TLB mode.
|
||||
@@ -186,6 +111,5 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
|
||||
|
||||
#endif
|
||||
|
45
arch/arm/include/asm/percpu.h
Normal file
45
arch/arm/include/asm/percpu.h
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright 2012 Calxeda, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef _ASM_ARM_PERCPU_H_
|
||||
#define _ASM_ARM_PERCPU_H_
|
||||
|
||||
/*
|
||||
* Same as asm-generic/percpu.h, except that we store the per cpu offset
|
||||
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
|
||||
*/
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
|
||||
static inline void set_my_cpu_offset(unsigned long off)
|
||||
{
|
||||
/* Set TPIDRPRW */
|
||||
asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
|
||||
}
|
||||
|
||||
static inline unsigned long __my_cpu_offset(void)
|
||||
{
|
||||
unsigned long off;
|
||||
/* Read TPIDRPRW */
|
||||
asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory");
|
||||
return off;
|
||||
}
|
||||
#define __my_cpu_offset __my_cpu_offset()
|
||||
#else
|
||||
#define set_my_cpu_offset(x) do {} while(0)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#include <asm-generic/percpu.h>
|
||||
|
||||
#endif /* _ASM_ARM_PERCPU_H_ */
|
@@ -21,4 +21,11 @@
|
||||
#define C(_x) PERF_COUNT_HW_CACHE_##_x
|
||||
#define CACHE_OP_UNSUPPORTED 0xFFFF
|
||||
|
||||
#ifdef CONFIG_HW_PERF_EVENTS
|
||||
struct pt_regs;
|
||||
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
|
||||
extern unsigned long perf_misc_flags(struct pt_regs *regs);
|
||||
#define perf_misc_flags(regs) perf_misc_flags(regs)
|
||||
#endif
|
||||
|
||||
#endif /* __ARM_PERF_EVENT_H__ */
|
||||
|
@@ -115,6 +115,7 @@
|
||||
* The PTE table pointer refers to the hardware entries; the "Linux"
|
||||
* entries are stored 1024 bytes below.
|
||||
*/
|
||||
#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
|
||||
#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
|
||||
#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
|
||||
#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
|
||||
@@ -123,6 +124,7 @@
|
||||
#define L_PTE_USER (_AT(pteval_t, 1) << 8)
|
||||
#define L_PTE_XN (_AT(pteval_t, 1) << 9)
|
||||
#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
|
||||
#define L_PTE_NONE (_AT(pteval_t, 1) << 11)
|
||||
|
||||
/*
|
||||
* These are the memory types, defined to be compatible with
|
||||
|
@@ -67,7 +67,8 @@
|
||||
* These bits overlap with the hardware bits but the naming is preserved for
|
||||
* consistency with the classic page table format.
|
||||
*/
|
||||
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */
|
||||
#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
|
||||
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
|
||||
#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
|
||||
#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
|
||||
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
|
||||
@@ -76,6 +77,7 @@
|
||||
#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
|
||||
#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
|
||||
#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
|
||||
#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
|
||||
|
||||
/*
|
||||
* To be used in assembly code with the upper page attributes.
|
||||
|
@@ -73,7 +73,7 @@ extern pgprot_t pgprot_kernel;
|
||||
|
||||
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
|
||||
|
||||
#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
|
||||
#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
|
||||
#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
|
||||
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
|
||||
#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
|
||||
@@ -83,7 +83,7 @@ extern pgprot_t pgprot_kernel;
|
||||
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
|
||||
#define PAGE_KERNEL_EXEC pgprot_kernel
|
||||
|
||||
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
|
||||
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
|
||||
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
|
||||
#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
|
||||
#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
|
||||
@@ -203,9 +203,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
|
||||
#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
|
||||
#define pte_special(pte) (0)
|
||||
|
||||
#define pte_present_user(pte) \
|
||||
((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
|
||||
(L_PTE_PRESENT | L_PTE_USER))
|
||||
#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
static inline void __sync_icache_dcache(pte_t pteval)
|
||||
@@ -242,7 +240,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
|
||||
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
|
||||
const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
|
||||
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
|
||||
return pte;
|
||||
}
|
||||
|
@@ -67,19 +67,19 @@ struct arm_pmu {
|
||||
cpumask_t active_irqs;
|
||||
char *name;
|
||||
irqreturn_t (*handle_irq)(int irq_num, void *dev);
|
||||
void (*enable)(struct hw_perf_event *evt, int idx);
|
||||
void (*disable)(struct hw_perf_event *evt, int idx);
|
||||
void (*enable)(struct perf_event *event);
|
||||
void (*disable)(struct perf_event *event);
|
||||
int (*get_event_idx)(struct pmu_hw_events *hw_events,
|
||||
struct hw_perf_event *hwc);
|
||||
struct perf_event *event);
|
||||
int (*set_event_filter)(struct hw_perf_event *evt,
|
||||
struct perf_event_attr *attr);
|
||||
u32 (*read_counter)(int idx);
|
||||
void (*write_counter)(int idx, u32 val);
|
||||
void (*start)(void);
|
||||
void (*stop)(void);
|
||||
u32 (*read_counter)(struct perf_event *event);
|
||||
void (*write_counter)(struct perf_event *event, u32 val);
|
||||
void (*start)(struct arm_pmu *);
|
||||
void (*stop)(struct arm_pmu *);
|
||||
void (*reset)(void *);
|
||||
int (*request_irq)(irq_handler_t handler);
|
||||
void (*free_irq)(void);
|
||||
int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
|
||||
void (*free_irq)(struct arm_pmu *);
|
||||
int (*map_event)(struct perf_event *event);
|
||||
int num_events;
|
||||
atomic_t active_events;
|
||||
@@ -93,15 +93,11 @@ struct arm_pmu {
|
||||
|
||||
extern const struct dev_pm_ops armpmu_dev_pm_ops;
|
||||
|
||||
int armpmu_register(struct arm_pmu *armpmu, char *name, int type);
|
||||
int armpmu_register(struct arm_pmu *armpmu, int type);
|
||||
|
||||
u64 armpmu_event_update(struct perf_event *event,
|
||||
struct hw_perf_event *hwc,
|
||||
int idx);
|
||||
u64 armpmu_event_update(struct perf_event *event);
|
||||
|
||||
int armpmu_event_set_period(struct perf_event *event,
|
||||
struct hw_perf_event *hwc,
|
||||
int idx);
|
||||
int armpmu_event_set_period(struct perf_event *event);
|
||||
|
||||
int armpmu_map_event(struct perf_event *event,
|
||||
const unsigned (*event_map)[PERF_COUNT_HW_MAX],
|
||||
|
@@ -15,6 +15,7 @@
|
||||
|
||||
extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
|
||||
extern void arm_dt_memblock_reserve(void);
|
||||
extern void __init arm_dt_init_cpu_maps(void);
|
||||
|
||||
#else /* CONFIG_OF */
|
||||
|
||||
@@ -24,6 +25,7 @@ static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
|
||||
}
|
||||
|
||||
static inline void arm_dt_memblock_reserve(void) { }
|
||||
static inline void arm_dt_init_cpu_maps(void) { }
|
||||
|
||||
#endif /* CONFIG_OF */
|
||||
#endif /* ASMARM_PROM_H */
|
||||
|
@@ -5,6 +5,9 @@
|
||||
#ifndef __ASMARM_SMP_PLAT_H
|
||||
#define __ASMARM_SMP_PLAT_H
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
|
||||
/*
|
||||
@@ -48,5 +51,19 @@ static inline int cache_ops_need_broadcast(void)
|
||||
*/
|
||||
extern int __cpu_logical_map[];
|
||||
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
|
||||
/*
|
||||
* Retrieve logical cpu index corresponding to a given MPIDR[23:0]
|
||||
* - mpidr: MPIDR[23:0] to be used for the look-up
|
||||
*
|
||||
* Returns the cpu logical index or -EINVAL on look-up error
|
||||
*/
|
||||
static inline int get_logical_index(u32 mpidr)
|
||||
{
|
||||
int cpu;
|
||||
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
||||
if (cpu_logical_map(cpu) == mpidr)
|
||||
return cpu;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user