Merge branch 'core/percpu' into x86/core
Conflicts: kernel/irq/handle.c
This commit is contained in:
@@ -467,6 +467,7 @@ int show_interrupts(struct seq_file *p, void *v);
|
||||
struct irq_desc;
|
||||
|
||||
extern int early_irq_init(void);
|
||||
extern int arch_probe_nr_irqs(void);
|
||||
extern int arch_early_irq_init(void);
|
||||
extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
|
||||
|
||||
|
@@ -182,11 +182,11 @@ struct irq_desc {
|
||||
unsigned int irqs_unhandled;
|
||||
spinlock_t lock;
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_t affinity;
|
||||
cpumask_var_t affinity;
|
||||
unsigned int cpu;
|
||||
#endif
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_t pending_mask;
|
||||
cpumask_var_t pending_mask;
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_PROC_FS
|
||||
struct proc_dir_entry *dir;
|
||||
@@ -422,4 +422,84 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
|
||||
|
||||
#endif /* !CONFIG_S390 */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/**
|
||||
* init_alloc_desc_masks - allocate cpumasks for irq_desc
|
||||
* @desc: pointer to irq_desc struct
|
||||
* @cpu: cpu which will be handling the cpumasks
|
||||
* @boot: true if need bootmem
|
||||
*
|
||||
* Allocates affinity and pending_mask cpumask if required.
|
||||
* Returns true if successful (or not required).
|
||||
* Side effect: affinity has all bits set, pending_mask has all bits clear.
|
||||
*/
|
||||
static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
|
||||
bool boot)
|
||||
{
|
||||
int node;
|
||||
|
||||
if (boot) {
|
||||
alloc_bootmem_cpumask_var(&desc->affinity);
|
||||
cpumask_setall(desc->affinity);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
alloc_bootmem_cpumask_var(&desc->pending_mask);
|
||||
cpumask_clear(desc->pending_mask);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
node = cpu_to_node(cpu);
|
||||
|
||||
if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
|
||||
return false;
|
||||
cpumask_setall(desc->affinity);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
|
||||
free_cpumask_var(desc->affinity);
|
||||
return false;
|
||||
}
|
||||
cpumask_clear(desc->pending_mask);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* init_copy_desc_masks - copy cpumasks for irq_desc
|
||||
* @old_desc: pointer to old irq_desc struct
|
||||
* @new_desc: pointer to new irq_desc struct
|
||||
*
|
||||
* Insures affinity and pending_masks are copied to new irq_desc.
|
||||
* If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
|
||||
* irq_desc struct so the copy is redundant.
|
||||
*/
|
||||
|
||||
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
|
||||
struct irq_desc *new_desc)
|
||||
{
|
||||
#ifdef CONFIG_CPUMASKS_OFFSTACK
|
||||
cpumask_copy(new_desc->affinity, old_desc->affinity);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
|
||||
static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
|
||||
bool boot)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
|
||||
struct irq_desc *new_desc)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#endif /* _LINUX_IRQ_H */
|
||||
|
@@ -20,6 +20,7 @@
|
||||
|
||||
# define for_each_irq_desc_reverse(irq, desc) \
|
||||
for (irq = nr_irqs - 1; irq >= 0; irq--)
|
||||
|
||||
#else /* CONFIG_GENERIC_HARDIRQS */
|
||||
|
||||
extern int nr_irqs;
|
||||
|
@@ -49,4 +49,5 @@
|
||||
#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA
|
||||
#define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA
|
||||
|
||||
#define STACK_END_MAGIC 0x57AC6E9D
|
||||
#endif /* __LINUX_MAGIC_H__ */
|
||||
|
@@ -9,34 +9,39 @@
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define DEFINE_PER_CPU(type, name) \
|
||||
__attribute__((__section__(".data.percpu"))) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
|
||||
#define PER_CPU_BASE_SECTION ".data.percpu"
|
||||
|
||||
#ifdef MODULE
|
||||
#define SHARED_ALIGNED_SECTION ".data.percpu"
|
||||
#define PER_CPU_SHARED_ALIGNED_SECTION ""
|
||||
#else
|
||||
#define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned"
|
||||
#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
|
||||
#endif
|
||||
#define PER_CPU_FIRST_SECTION ".first"
|
||||
|
||||
#else
|
||||
|
||||
#define PER_CPU_BASE_SECTION ".data"
|
||||
#define PER_CPU_SHARED_ALIGNED_SECTION ""
|
||||
#define PER_CPU_FIRST_SECTION ""
|
||||
|
||||
#endif
|
||||
|
||||
#define DEFINE_PER_CPU_SECTION(type, name, section) \
|
||||
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
|
||||
|
||||
#define DEFINE_PER_CPU(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, "")
|
||||
|
||||
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
|
||||
__attribute__((__section__(SHARED_ALIGNED_SECTION))) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \
|
||||
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
|
||||
____cacheline_aligned_in_smp
|
||||
|
||||
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
|
||||
__attribute__((__section__(".data.percpu.page_aligned"))) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
|
||||
#else
|
||||
#define DEFINE_PER_CPU(type, name) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
|
||||
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
|
||||
|
||||
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
|
||||
DEFINE_PER_CPU(type, name)
|
||||
|
||||
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
|
||||
DEFINE_PER_CPU(type, name)
|
||||
#endif
|
||||
#define DEFINE_PER_CPU_FIRST(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
|
||||
|
||||
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
|
||||
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
|
||||
|
@@ -1161,10 +1161,9 @@ struct task_struct {
|
||||
pid_t pid;
|
||||
pid_t tgid;
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
/* Canary value for the -fstack-protector gcc feature */
|
||||
unsigned long stack_canary;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* pointers to (original) parent process, youngest child, younger sibling,
|
||||
* older sibling, respectively. (p->father can be replaced with
|
||||
@@ -2070,6 +2069,19 @@ static inline int object_is_on_stack(void *obj)
|
||||
|
||||
extern void thread_info_cache_init(void);
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||
static inline unsigned long stack_not_used(struct task_struct *p)
|
||||
{
|
||||
unsigned long *n = end_of_stack(p);
|
||||
|
||||
do { /* Skip over canary */
|
||||
n++;
|
||||
} while (!*n);
|
||||
|
||||
return (unsigned long)n - (unsigned long)end_of_stack(p);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* set thread flags in other task's structures
|
||||
* - see asm/thread_info.h for TIF_xxxx flags available
|
||||
*/
|
||||
|
16
include/linux/stackprotector.h
Normal file
16
include/linux/stackprotector.h
Normal file
@@ -0,0 +1,16 @@
|
||||
#ifndef _LINUX_STACKPROTECTOR_H
|
||||
#define _LINUX_STACKPROTECTOR_H 1
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
# include <asm/stackprotector.h>
|
||||
#else
|
||||
static inline void boot_init_stack_canary(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@@ -193,5 +193,11 @@ int arch_update_cpu_topology(void);
|
||||
#ifndef topology_core_siblings
|
||||
#define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
|
||||
#endif
|
||||
#ifndef topology_thread_cpumask
|
||||
#define topology_thread_cpumask(cpu) cpumask_of(cpu)
|
||||
#endif
|
||||
#ifndef topology_core_cpumask
|
||||
#define topology_core_cpumask(cpu) cpumask_of(cpu)
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_TOPOLOGY_H */
|
||||
|
Reference in New Issue
Block a user