Merge branch 'x86-nuke386-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull "Nuke 386-DX/SX support" from Ingo Molnar: "This tree removes ancient-386-CPUs support and thus zaps quite a bit of complexity: 24 files changed, 56 insertions(+), 425 deletions(-) ... which complexity has plagued us with extra work whenever we wanted to change SMP primitives, for years. Unfortunately there's a nostalgic cost: your old original 386 DX33 system from early 1991 won't be able to boot modern Linux kernels anymore. Sniff." I'm not sentimental. Good riddance. * 'x86-nuke386-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, 386 removal: Document Nx586 as a 386 and thus unsupported x86, cleanups: Simplify sync_core() in the case of no CPUID x86, 386 removal: Remove CONFIG_X86_POPAD_OK x86, 386 removal: Remove CONFIG_X86_WP_WORKS_OK x86, 386 removal: Remove CONFIG_INVLPG x86, 386 removal: Remove CONFIG_BSWAP x86, 386 removal: Remove CONFIG_XADD x86, 386 removal: Remove CONFIG_CMPXCHG x86, 386 removal: Remove CONFIG_M386 from Kconfig
This commit is contained in:
@@ -172,23 +172,7 @@ static inline int atomic_add_negative(int i, atomic_t *v)
|
||||
*/
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
#ifdef CONFIG_M386
|
||||
int __i;
|
||||
unsigned long flags;
|
||||
if (unlikely(boot_cpu_data.x86 <= 3))
|
||||
goto no_xadd;
|
||||
#endif
|
||||
/* Modern 486+ processor */
|
||||
return i + xadd(&v->counter, i);
|
||||
|
||||
#ifdef CONFIG_M386
|
||||
no_xadd: /* Legacy 386 processor */
|
||||
raw_local_irq_save(flags);
|
||||
__i = atomic_read(v);
|
||||
atomic_set(v, i + __i);
|
||||
raw_local_irq_restore(flags);
|
||||
return i + __i;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -34,9 +34,7 @@ static inline void set_64bit(volatile u64 *ptr, u64 value)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_CMPXCHG
|
||||
#define __HAVE_ARCH_CMPXCHG 1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_CMPXCHG64
|
||||
#define cmpxchg64(ptr, o, n) \
|
||||
@@ -73,59 +71,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
|
||||
return prev;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_X86_CMPXCHG
|
||||
/*
|
||||
* Building a kernel capable running on 80386. It may be necessary to
|
||||
* simulate the cmpxchg on the 80386 CPU. For that purpose we define
|
||||
* a function for each of the sizes we support.
|
||||
*/
|
||||
|
||||
extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
|
||||
extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
|
||||
extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
|
||||
|
||||
static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
return cmpxchg_386_u8(ptr, old, new);
|
||||
case 2:
|
||||
return cmpxchg_386_u16(ptr, old, new);
|
||||
case 4:
|
||||
return cmpxchg_386_u32(ptr, old, new);
|
||||
}
|
||||
return old;
|
||||
}
|
||||
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
if (likely(boot_cpu_data.x86 > 3)) \
|
||||
__ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
|
||||
(unsigned long)(o), (unsigned long)(n), \
|
||||
sizeof(*(ptr))); \
|
||||
else \
|
||||
__ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
|
||||
(unsigned long)(o), (unsigned long)(n), \
|
||||
sizeof(*(ptr))); \
|
||||
__ret; \
|
||||
})
|
||||
#define cmpxchg_local(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
if (likely(boot_cpu_data.x86 > 3)) \
|
||||
__ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
|
||||
(unsigned long)(o), (unsigned long)(n), \
|
||||
sizeof(*(ptr))); \
|
||||
else \
|
||||
__ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
|
||||
(unsigned long)(o), (unsigned long)(n), \
|
||||
sizeof(*(ptr))); \
|
||||
__ret; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_X86_CMPXCHG64
|
||||
/*
|
||||
* Building a kernel capable running on 80386 and 80486. It may be necessary
|
||||
|
@@ -313,12 +313,6 @@ extern const char * const x86_power_flags[32];
|
||||
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
|
||||
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
|
||||
|
||||
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
|
||||
# define cpu_has_invlpg 1
|
||||
#else
|
||||
# define cpu_has_invlpg (boot_cpu_data.x86 > 3)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
#undef cpu_has_vme
|
||||
|
@@ -55,12 +55,6 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
|
||||
/* Real i386 machines can only support FUTEX_OP_SET */
|
||||
if (op != FUTEX_OP_SET && boot_cpu_data.x86 == 3)
|
||||
return -ENOSYS;
|
||||
#endif
|
||||
|
||||
pagefault_disable();
|
||||
|
||||
switch (op) {
|
||||
@@ -118,12 +112,6 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
|
||||
/* Real i386 machines have no cmpxchg instruction */
|
||||
if (boot_cpu_data.x86 == 3)
|
||||
return -ENOSYS;
|
||||
#endif
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
|
@@ -124,27 +124,11 @@ static inline int local_add_negative(long i, local_t *l)
|
||||
*/
|
||||
static inline long local_add_return(long i, local_t *l)
|
||||
{
|
||||
long __i;
|
||||
#ifdef CONFIG_M386
|
||||
unsigned long flags;
|
||||
if (unlikely(boot_cpu_data.x86 <= 3))
|
||||
goto no_xadd;
|
||||
#endif
|
||||
/* Modern 486+ processor */
|
||||
__i = i;
|
||||
long __i = i;
|
||||
asm volatile(_ASM_XADD "%0, %1;"
|
||||
: "+r" (i), "+m" (l->a.counter)
|
||||
: : "memory");
|
||||
return i + __i;
|
||||
|
||||
#ifdef CONFIG_M386
|
||||
no_xadd: /* Legacy 386 processor */
|
||||
local_irq_save(flags);
|
||||
__i = local_read(l);
|
||||
local_set(l, i + __i);
|
||||
local_irq_restore(flags);
|
||||
return i + __i;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline long local_sub_return(long i, local_t *l)
|
||||
|
@@ -5,8 +5,6 @@
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* X86_64 does not define MODULE_PROC_FAMILY */
|
||||
#elif defined CONFIG_M386
|
||||
#define MODULE_PROC_FAMILY "386 "
|
||||
#elif defined CONFIG_M486
|
||||
#define MODULE_PROC_FAMILY "486 "
|
||||
#elif defined CONFIG_M586
|
||||
|
@@ -406,7 +406,6 @@ do { \
|
||||
#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
|
||||
#ifndef CONFIG_M386
|
||||
#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
|
||||
@@ -421,8 +420,6 @@ do { \
|
||||
#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
|
||||
#endif /* !CONFIG_M386 */
|
||||
|
||||
#ifdef CONFIG_X86_CMPXCHG64
|
||||
#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \
|
||||
({ \
|
||||
|
@@ -672,18 +672,29 @@ static inline void sync_core(void)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
#if defined(CONFIG_M386) || defined(CONFIG_M486)
|
||||
if (boot_cpu_data.x86 < 5)
|
||||
/* There is no speculative execution.
|
||||
* jmp is a barrier to prefetching. */
|
||||
asm volatile("jmp 1f\n1:\n" ::: "memory");
|
||||
else
|
||||
#ifdef CONFIG_M486
|
||||
/*
|
||||
* Do a CPUID if available, otherwise do a jump. The jump
|
||||
* can conveniently enough be the jump around CPUID.
|
||||
*/
|
||||
asm volatile("cmpl %2,%1\n\t"
|
||||
"jl 1f\n\t"
|
||||
"cpuid\n"
|
||||
"1:"
|
||||
: "=a" (tmp)
|
||||
: "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1)
|
||||
: "ebx", "ecx", "edx", "memory");
|
||||
#else
|
||||
/*
|
||||
* CPUID is a barrier to speculative execution.
|
||||
* Prefetched instructions are automatically
|
||||
* invalidated when modified.
|
||||
*/
|
||||
asm volatile("cpuid"
|
||||
: "=a" (tmp)
|
||||
: "0" (1)
|
||||
: "ebx", "ecx", "edx", "memory");
|
||||
#endif
|
||||
/* cpuid is a barrier to speculative execution.
|
||||
* Prefetched instructions are automatically
|
||||
* invalidated when modified. */
|
||||
asm volatile("cpuid" : "=a" (tmp) : "0" (1)
|
||||
: "ebx", "ecx", "edx", "memory");
|
||||
}
|
||||
|
||||
static inline void __monitor(const void *eax, unsigned long ecx,
|
||||
|
@@ -6,22 +6,7 @@
|
||||
|
||||
static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
|
||||
{
|
||||
#ifdef __i386__
|
||||
# ifdef CONFIG_X86_BSWAP
|
||||
asm("bswap %0" : "=r" (val) : "0" (val));
|
||||
# else
|
||||
asm("xchgb %b0,%h0\n\t" /* swap lower bytes */
|
||||
"rorl $16,%0\n\t" /* swap words */
|
||||
"xchgb %b0,%h0" /* swap higher bytes */
|
||||
: "=q" (val)
|
||||
: "0" (val));
|
||||
# endif
|
||||
|
||||
#else /* __i386__ */
|
||||
asm("bswapl %0"
|
||||
: "=r" (val)
|
||||
: "0" (val));
|
||||
#endif
|
||||
asm("bswapl %0" : "=r" (val) : "0" (val));
|
||||
return val;
|
||||
}
|
||||
#define __arch_swab32 __arch_swab32
|
||||
@@ -37,22 +22,12 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 val)
|
||||
__u64 u;
|
||||
} v;
|
||||
v.u = val;
|
||||
# ifdef CONFIG_X86_BSWAP
|
||||
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
|
||||
: "=r" (v.s.a), "=r" (v.s.b)
|
||||
: "0" (v.s.a), "1" (v.s.b));
|
||||
# else
|
||||
v.s.a = __arch_swab32(v.s.a);
|
||||
v.s.b = __arch_swab32(v.s.b);
|
||||
asm("xchgl %0,%1"
|
||||
: "=r" (v.s.a), "=r" (v.s.b)
|
||||
: "0" (v.s.a), "1" (v.s.b));
|
||||
# endif
|
||||
return v.u;
|
||||
#else /* __i386__ */
|
||||
asm("bswapq %0"
|
||||
: "=r" (val)
|
||||
: "0" (val));
|
||||
asm("bswapq %0" : "=r" (val) : "0" (val));
|
||||
return val;
|
||||
#endif
|
||||
}
|
||||
|
@@ -56,10 +56,7 @@ static inline void __flush_tlb_all(void)
|
||||
|
||||
static inline void __flush_tlb_one(unsigned long addr)
|
||||
{
|
||||
if (cpu_has_invlpg)
|
||||
__flush_tlb_single(addr);
|
||||
else
|
||||
__flush_tlb();
|
||||
}
|
||||
|
||||
#define TLB_FLUSH_ALL -1UL
|
||||
|
@@ -237,8 +237,6 @@ extern void __put_user_2(void);
|
||||
extern void __put_user_4(void);
|
||||
extern void __put_user_8(void);
|
||||
|
||||
#ifdef CONFIG_X86_WP_WORKS_OK
|
||||
|
||||
/**
|
||||
* put_user: - Write a simple value into user space.
|
||||
* @x: Value to copy to user space.
|
||||
@@ -326,29 +324,6 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
|
||||
#define __put_user_size(x, ptr, size, retval, errret) \
|
||||
do { \
|
||||
__typeof__(*(ptr))__pus_tmp = x; \
|
||||
retval = 0; \
|
||||
\
|
||||
if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
|
||||
retval = errret; \
|
||||
} while (0)
|
||||
|
||||
#define put_user(x, ptr) \
|
||||
({ \
|
||||
int __ret_pu; \
|
||||
__typeof__(*(ptr))__pus_tmp = x; \
|
||||
__ret_pu = 0; \
|
||||
if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
|
||||
sizeof(*(ptr))) != 0)) \
|
||||
__ret_pu = -EFAULT; \
|
||||
__ret_pu; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
|
||||
#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
|
||||
@@ -543,29 +518,12 @@ struct __large_struct { unsigned long buf[100]; };
|
||||
(x) = (__force __typeof__(*(ptr)))__gue_val; \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_X86_WP_WORKS_OK
|
||||
|
||||
#define put_user_try uaccess_try
|
||||
#define put_user_catch(err) uaccess_catch(err)
|
||||
|
||||
#define put_user_ex(x, ptr) \
|
||||
__put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
#else /* !CONFIG_X86_WP_WORKS_OK */
|
||||
|
||||
#define put_user_try do { \
|
||||
int __uaccess_err = 0;
|
||||
|
||||
#define put_user_catch(err) \
|
||||
(err) |= __uaccess_err; \
|
||||
} while (0)
|
||||
|
||||
#define put_user_ex(x, ptr) do { \
|
||||
__uaccess_err |= __put_user(x, ptr); \
|
||||
} while (0)
|
||||
|
||||
#endif /* CONFIG_X86_WP_WORKS_OK */
|
||||
|
||||
extern unsigned long
|
||||
copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
|
||||
extern __must_check long
|
||||
|
Reference in New Issue
Block a user