Merge tag 'mips_5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux

Pull MIPS changes from Paul Burton:
 "Nothing too big or scary in here:

   - Support mremap() for the VDSO, primarily to allow CRIU to restore
     the VDSO to its checkpointed location.

   - Restore the MIPS32 cBPF JIT, after having reverted the enablement
     of the eBPF JIT for MIPS32 systems in the 5.5 cycle.

   - Improve cop0 counter synchronization behaviour whilst onlining CPUs
     by running with interrupts disabled.

   - Better match FPU behaviour when emulating multiply-accumulate
     instructions on pre-r6 systems that implement IEEE754-2008 style
     MACs.

   - Loongson64 kernels now build using the MIPS64r2 ISA, allowing them
     to take advantage of instructions introduced by r2.

   - Support for the Ingenic X1000 SoC & the really nice little CU Neo
     development board that's using it.

   - Support for WMAC on GARDENA Smart Gateway devices.

   - Lots of cleanup & refactoring of SGI IP27 (Origin 2*) support in
     preparation for introducing IP35 (Origin 3*) support.

   - Various Kconfig & Makefile cleanups"

* tag 'mips_5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (60 commits)
  MIPS: PCI: Add detection of IOC3 on IO7, IO8, IO9 and Fuel
  MIPS: Loongson64: Disable exec hazard
  MIPS: Loongson64: Bump ISA level to MIPSR2
  MIPS: Make DIEI support as a config option
  MIPS: OCTEON: octeon-irq: fix spelling mistake "to" -> "too"
  MIPS: asm: local: add barriers for Loongson
  MIPS: Loongson64: Select mac2008 only feature
  MIPS: Add MAC2008 Support
  Revert "MIPS: Add custom serial.h with BASE_BAUD override for generic kernel"
  MIPS: sort MIPS and MIPS_GENERIC Kconfig selects alphabetically (again)
  MIPS: make CPU_HAS_LOAD_STORE_LR opt-out
  MIPS: generic: don't unconditionally select PINCTRL
  MIPS: don't explicitly select LIBFDT in Kconfig
  MIPS: sync-r4k: do slave counter synchronization with disabled HW interrupts
  MIPS: SGI-IP30: Check for valid pointer before using it
  MIPS: syscalls: fix indentation of the 'SYSNR' message
  MIPS: boot: fix typo in 'vmlinux.lzma.its' target
  MIPS: fix indentation of the 'RELOCS' message
  dt-bindings: Document loongson vendor-prefix
  MIPS: CU1000-Neo: Refresh defconfig to support HWMON and WiFi.
  ...
This commit is contained in:
Linus Torvalds
2020-01-31 11:28:31 -08:00
95 changed files with 3889 additions and 1240 deletions

View File

@@ -102,7 +102,12 @@ static void cpu_set_fpu_2008(struct cpuinfo_mips *c)
if (fir & MIPS_FPIR_HAS2008) {
fcsr = read_32bit_cp1_register(CP1_STATUS);
fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008);
/*
* MAC2008 toolchain never landed in real world, so we're only
* testing wether it can be disabled and don't try to enabled
* it.
*/
fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008 | FPU_CSR_MAC2008);
write_32bit_cp1_register(CP1_STATUS, fcsr0);
fcsr0 = read_32bit_cp1_register(CP1_STATUS);
@@ -112,6 +117,15 @@ static void cpu_set_fpu_2008(struct cpuinfo_mips *c)
write_32bit_cp1_register(CP1_STATUS, fcsr);
if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2)) {
/*
* The bit for MAC2008 might be reused by R6 in future,
* so we only test for R2-R5.
*/
if (fcsr0 & FPU_CSR_MAC2008)
c->options |= MIPS_CPU_MAC_2008_ONLY;
}
if (!(fcsr0 & FPU_CSR_NAN2008))
c->options |= MIPS_CPU_NAN_LEGACY;
if (fcsr1 & FPU_CSR_NAN2008)
@@ -1960,10 +1974,8 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
BUG_ON(!__builtin_constant_p(cpu_has_counter) || cpu_has_counter);
switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_XBURST:
c->cputype = CPU_XBURST;
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
__cpu_name[cpu] = "Ingenic JZRISC";
case PRID_IMP_XBURST_REV1:
/*
* The XBurst core by default attempts to avoid branch target
* buffer lookups by detecting & special casing loops. This
@@ -1971,36 +1983,45 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
* Set cp0 config7 bit 4 to disable this feature.
*/
set_c0_config7(MIPS_CONF7_BTB_LOOP_EN);
switch (c->processor_id & PRID_COMP_MASK) {
/*
* The config0 register in the XBurst CPUs with a processor ID of
* PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible,
* but they don't actually support this ISA.
*/
case PRID_COMP_INGENIC_D0:
c->isa_level &= ~MIPS_CPU_ISA_M32R2;
break;
/*
* The config0 register in the XBurst CPUs with a processor ID of
* PRID_COMP_INGENIC_D1 has an abandoned huge page tlb mode, this
* mode is not compatible with the MIPS standard, it will cause
* tlbmiss and into an infinite loop (line 21 in the tlb-funcs.S)
* when starting the init process. After chip reset, the default
* is HPTLB mode, Write 0xa9000000 to cp0 register 5 sel 4 to
* switch back to VTLB mode to prevent getting stuck.
*/
case PRID_COMP_INGENIC_D1:
write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS);
break;
default:
break;
}
/* fall-through */
case PRID_IMP_XBURST_REV2:
c->cputype = CPU_XBURST;
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
__cpu_name[cpu] = "Ingenic XBurst";
break;
default:
panic("Unknown Ingenic Processor ID!");
break;
}
switch (c->processor_id & PRID_COMP_MASK) {
/*
* The config0 register in the XBurst CPUs with a processor ID of
* PRID_COMP_INGENIC_D1 has an abandoned huge page tlb mode, this
* mode is not compatible with the MIPS standard, it will cause
* tlbmiss and into an infinite loop (line 21 in the tlb-funcs.S)
* when starting the init process. After chip reset, the default
* is HPTLB mode, Write 0xa9000000 to cp0 register 5 sel 4 to
* switch back to VTLB mode to prevent getting stuck.
*/
case PRID_COMP_INGENIC_D1:
write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS);
break;
/*
* The config0 register in the XBurst CPUs with a processor ID of
* PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible,
* but they don't actually support this ISA.
*/
case PRID_COMP_INGENIC_D0:
c->isa_level &= ~MIPS_CPU_ISA_M32R2;
break;
default:
break;
}
}
static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)

View File

@@ -515,8 +515,7 @@ static void __init request_crashkernel(struct resource *res)
ret = request_resource(res, &crashk_res);
if (!ret)
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
(unsigned long)((crashk_res.end -
crashk_res.start + 1) >> 20),
(unsigned long)(resource_size(&crashk_res) >> 20),
(unsigned long)(crashk_res.start >> 20));
}
#else /* !defined(CONFIG_KEXEC) */
@@ -698,8 +697,7 @@ static void __init arch_mem_init(char **cmdline_p)
mips_parse_crashkernel();
#ifdef CONFIG_KEXEC
if (crashk_res.start != crashk_res.end)
memblock_reserve(crashk_res.start,
crashk_res.end - crashk_res.start + 1);
memblock_reserve(crashk_res.start, resource_size(&crashk_res));
#endif
device_tree_init();
sparse_init();

View File

@@ -90,6 +90,9 @@ void synchronise_count_master(int cpu)
void synchronise_count_slave(int cpu)
{
int i;
unsigned long flags;
local_irq_save(flags);
/*
* Not every cpu is online at the time this gets called,
@@ -113,5 +116,7 @@ void synchronise_count_slave(int cpu)
}
/* Arrange for an interrupt in a short while */
write_c0_compare(read_c0_count() + COUNTON);
local_irq_restore(flags);
}
#undef NR_LOOPS

View File

@@ -18,7 +18,7 @@ quiet_cmd_syshdr = SYSHDR $@
'$(syshdr_pfx_$(basetarget))' \
'$(syshdr_offset_$(basetarget))'
quiet_cmd_sysnr = SYSNR $@
quiet_cmd_sysnr = SYSNR $@
cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \
'$(sysnr_abis_$(basetarget))' \
'$(sysnr_pfx_$(basetarget))' \

View File

@@ -131,7 +131,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
#define _LoadW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -152,7 +152,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
/* For CPUs without lwl instruction */
#define _LoadW(addr, value, res, type) \
do { \
@@ -187,7 +187,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
#define _LoadHWU(addr, value, res, type) \
do { \
@@ -213,7 +213,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
#define _LoadWU(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -256,7 +256,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
/* For CPUs without lwl and ldl instructions */
#define _LoadWU(addr, value, res, type) \
do { \
@@ -340,7 +340,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
#define _StoreHW(addr, value, res, type) \
@@ -366,7 +366,7 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT));\
} while(0)
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
#define _StoreW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -407,7 +407,7 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT)); \
} while(0)
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
#define _StoreW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -483,7 +483,7 @@ do { \
: "memory"); \
} while(0)
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
#else /* __BIG_ENDIAN */
@@ -509,7 +509,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
#define _LoadW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -530,7 +530,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
/* For CPUs without lwl instruction */
#define _LoadW(addr, value, res, type) \
do { \
@@ -565,7 +565,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
#define _LoadHWU(addr, value, res, type) \
@@ -592,7 +592,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
#define _LoadWU(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -635,7 +635,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
/* For CPUs without lwl and ldl instructions */
#define _LoadWU(addr, value, res, type) \
do { \
@@ -718,7 +718,7 @@ do { \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
#define _StoreHW(addr, value, res, type) \
do { \
@@ -743,7 +743,7 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT));\
} while(0)
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
#define _StoreW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -784,7 +784,7 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT)); \
} while(0)
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
/* For CPUs without swl and sdl instructions */
#define _StoreW(addr, value, res, type) \
do { \
@@ -861,7 +861,7 @@ do { \
: "memory"); \
} while(0)
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
#endif
#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)