Merge branch 'x86/pti' into x86/mm, to pick up dependencies

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar
2018-03-12 12:10:03 +01:00
69 changed files with 912 additions and 473 deletions

View File

@@ -27,3 +27,8 @@
#if __has_feature(address_sanitizer)
#define __SANITIZE_ADDRESS__
#endif
/* Clang doesn't have a way to turn it off per-function, yet. */
#ifdef __noretpoline
#undef __noretpoline
#endif

View File

@@ -93,6 +93,10 @@
#define __weak __attribute__((weak))
#define __alias(symbol) __attribute__((alias(#symbol)))
#ifdef RETPOLINE
#define __noretpoline __attribute__((indirect_branch("keep")))
#endif
/*
* it doesn't make sense on ARM (currently the only user of __naked)
* to trace naked functions because then mcount is called without

View File

@@ -6,10 +6,10 @@
#include <linux/types.h>
/* Built-in __init functions needn't be compiled with retpoline */
#if defined(RETPOLINE) && !defined(MODULE)
#define __noretpoline __attribute__((indirect_branch("keep")))
#if defined(__noretpoline) && !defined(MODULE)
#define __noinitretpoline __noretpoline
#else
#define __noretpoline
#define __noinitretpoline
#endif
/* These macros are used to mark some functions or
@@ -47,7 +47,7 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
#define __init __section(.init.text) __cold __latent_entropy __noretpoline
#define __init __section(.init.text) __cold __latent_entropy __noinitretpoline
#define __initdata __section(.init.data)
#define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data)

View File

@@ -151,6 +151,7 @@ extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
extern void jump_label_init(void);
extern void jump_label_invalidate_init(void);
extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
@@ -198,6 +199,8 @@ static __always_inline void jump_label_init(void)
static_key_initialized = true;
}
static inline void jump_label_invalidate_init(void) {}
static __always_inline bool static_key_false(struct static_key *key)
{
if (unlikely(static_key_count(key) > 0))

View File

@@ -472,6 +472,7 @@ extern bool parse_option_str(const char *str, const char *option);
extern char *next_arg(char *args, char **param, char **val);
extern int core_kernel_text(unsigned long addr);
extern int init_kernel_text(unsigned long addr);
extern int core_kernel_data(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);

View File

@@ -5,6 +5,7 @@
#ifndef _LINUX_NOSPEC_H
#define _LINUX_NOSPEC_H
#include <asm/barrier.h>
/**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
@@ -29,26 +30,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
}
#endif
/*
* Warn developers about inappropriate array_index_nospec() usage.
*
* Even if the CPU speculates past the WARN_ONCE branch, the
* sign bit of @index is taken into account when generating the
* mask.
*
* This warning is compiled out when the compiler can infer that
* @index and @size are less than LONG_MAX.
*/
#define array_index_mask_nospec_check(index, size) \
({ \
if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \
"array_index_nospec() limited to range of [0, LONG_MAX]\n")) \
_mask = 0; \
else \
_mask = array_index_mask_nospec(index, size); \
_mask; \
})
/*
* array_index_nospec - sanitize an array index after a bounds check
*
@@ -67,12 +48,11 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
({ \
typeof(index) _i = (index); \
typeof(size) _s = (size); \
unsigned long _mask = array_index_mask_nospec_check(_i, _s); \
unsigned long _mask = array_index_mask_nospec(_i, _s); \
\
BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
\
_i &= _mask; \
_i; \
(typeof(_i)) (_i & _mask); \
})
#endif /* _LINUX_NOSPEC_H */