Merge tag 'mips_4.20' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
Pull MIPS updates from Paul Burton: - kexec support for the generic MIPS platform when running on a CPU including the MIPS Coherence Manager & related hardware. - Improvements to the definition of memory barriers used around MMIO accesses, and fixes in their use. - Switch to CONFIG_NO_BOOTMEM from Mike Rapoport, finally dropping reliance on the old bootmem code. - A number of fixes & improvements for Loongson 3 systems. - DT & config updates for the Microsemi Ocelot platform. - Workaround to enable USB power on the Netgear WNDR3400v3. - Various cleanups & fixes. * tag 'mips_4.20' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (51 commits) MIPS: Cleanup DSP ASE detection MIPS: dts: Change upper case to lower case MIPS: generic: Add Network, SPI and I2C to ocelot_defconfig MIPS: Loongson-3: Fix BRIDGE irq delivery problem MIPS: Loongson-3: Fix CPU UART irq delivery problem MIPS: Remove unused PREF, PREFE & PREFX macros MIPS: lib: Use kernel_pref & user_pref in memcpy() MIPS: Remove unused CAT macro MIPS: Add kernel_pref & user_pref helpers MIPS: Remove unused TTABLE macro MIPS: Remove unused PIC macros MIPS: Remove unused MOVN & MOVZ macros MIPS: Provide actually relaxed MMIO accessors MIPS: Enforce strong ordering for MMIO accessors MIPS: Correct `mmiowb' barrier for `wbflush' platforms MIPS: Define MMIO ordering barriers MIPS: mscc: add PCB120 to the ocelot fitImage MIPS: mscc: add DT for Ocelot PCB120 MIPS: memset: Limit excessive `noreorder' assembly mode use MIPS: memset: Fix CPU_DADDI_WORKAROUNDS `small_fixup' regression ...
This commit is contained in:
@@ -113,22 +113,4 @@ obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
|
||||
obj-$(CONFIG_CPU_PM) += pm.o
|
||||
obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o
|
||||
|
||||
#
|
||||
# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
|
||||
# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
|
||||
# here because the compiler may use DSP ASE instructions (such as lwx) in
|
||||
# code paths where we cannot check that the CPU we are running on supports it.
|
||||
# Proper abstraction using HAVE_AS_DSP and macros is done in
|
||||
# arch/mips/include/asm/mipsregs.h.
|
||||
#
|
||||
ifeq ($(CONFIG_CPU_MIPSR2), y)
|
||||
CFLAGS_DSP = -DHAVE_AS_DSP
|
||||
|
||||
CFLAGS_signal.o = $(CFLAGS_DSP)
|
||||
CFLAGS_signal32.o = $(CFLAGS_DSP)
|
||||
CFLAGS_process.o = $(CFLAGS_DSP)
|
||||
CFLAGS_branch.o = $(CFLAGS_DSP)
|
||||
CFLAGS_ptrace.o = $(CFLAGS_DSP)
|
||||
endif
|
||||
|
||||
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
|
||||
|
@@ -36,6 +36,9 @@ static void crash_shutdown_secondary(void *passed_regs)
|
||||
if (!cpu_online(cpu))
|
||||
return;
|
||||
|
||||
/* We won't be sent IPIs any more. */
|
||||
set_cpu_online(cpu, false);
|
||||
|
||||
local_irq_disable();
|
||||
if (!cpumask_test_cpu(cpu, &cpus_in_crash))
|
||||
crash_save_cpu(regs, cpu);
|
||||
@@ -43,7 +46,9 @@ static void crash_shutdown_secondary(void *passed_regs)
|
||||
|
||||
while (!atomic_read(&kexec_ready_to_reboot))
|
||||
cpu_relax();
|
||||
relocated_kexec_smp_wait(NULL);
|
||||
|
||||
kexec_reboot();
|
||||
|
||||
/* NOTREACHED */
|
||||
}
|
||||
|
||||
|
@@ -77,7 +77,7 @@ EXPORT(_stext)
|
||||
*/
|
||||
FEXPORT(__kernel_entry)
|
||||
j kernel_entry
|
||||
#endif
|
||||
#endif /* CONFIG_BOOT_RAW */
|
||||
|
||||
__REF
|
||||
|
||||
@@ -94,24 +94,26 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
|
||||
0:
|
||||
|
||||
#ifdef CONFIG_USE_OF
|
||||
#ifdef CONFIG_MIPS_RAW_APPENDED_DTB
|
||||
#if defined(CONFIG_MIPS_RAW_APPENDED_DTB) || \
|
||||
defined(CONFIG_MIPS_ELF_APPENDED_DTB)
|
||||
|
||||
PTR_LA t2, __appended_dtb
|
||||
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
li t1, 0xd00dfeed
|
||||
#else
|
||||
#else /* !CONFIG_CPU_BIG_ENDIAN */
|
||||
li t1, 0xedfe0dd0
|
||||
#endif
|
||||
#endif /* !CONFIG_CPU_BIG_ENDIAN */
|
||||
lw t0, (t2)
|
||||
beq t0, t1, dtb_found
|
||||
#endif
|
||||
#endif /* CONFIG_MIPS_RAW_APPENDED_DTB || CONFIG_MIPS_ELF_APPENDED_DTB */
|
||||
li t1, -2
|
||||
move t2, a1
|
||||
beq a0, t1, dtb_found
|
||||
|
||||
li t2, 0
|
||||
dtb_found:
|
||||
#endif
|
||||
#endif /* CONFIG_USE_OF */
|
||||
PTR_LA t0, __bss_start # clear .bss
|
||||
LONG_S zero, (t0)
|
||||
PTR_LA t1, __bss_stop - LONGSIZE
|
||||
@@ -156,9 +158,9 @@ dtb_found:
|
||||
* newly sync'd icache.
|
||||
*/
|
||||
jr.hb v0
|
||||
#else
|
||||
#else /* !CONFIG_RELOCATABLE */
|
||||
j start_kernel
|
||||
#endif
|
||||
#endif /* !CONFIG_RELOCATABLE */
|
||||
END(kernel_entry)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@@ -9,6 +9,7 @@
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/libfdt.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/page.h>
|
||||
@@ -19,15 +20,18 @@ extern const size_t relocate_new_kernel_size;
|
||||
extern unsigned long kexec_start_address;
|
||||
extern unsigned long kexec_indirection_page;
|
||||
|
||||
int (*_machine_kexec_prepare)(struct kimage *) = NULL;
|
||||
void (*_machine_kexec_shutdown)(void) = NULL;
|
||||
void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
|
||||
static unsigned long reboot_code_buffer;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void (*relocated_kexec_smp_wait) (void *);
|
||||
static void (*relocated_kexec_smp_wait)(void *);
|
||||
|
||||
atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
|
||||
void (*_crash_smp_send_stop)(void) = NULL;
|
||||
#endif
|
||||
|
||||
void (*_machine_kexec_shutdown)(void) = NULL;
|
||||
void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
|
||||
|
||||
static void kexec_image_info(const struct kimage *kimage)
|
||||
{
|
||||
unsigned long i;
|
||||
@@ -48,13 +52,59 @@ static void kexec_image_info(const struct kimage *kimage)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_UHI_BOOT
|
||||
|
||||
static int uhi_machine_kexec_prepare(struct kimage *kimage)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* In case DTB file is not passed to the new kernel, a flat device
|
||||
* tree will be created by kexec tool. It holds modified command
|
||||
* line for the new kernel.
|
||||
*/
|
||||
for (i = 0; i < kimage->nr_segments; i++) {
|
||||
struct fdt_header fdt;
|
||||
|
||||
if (kimage->segment[i].memsz <= sizeof(fdt))
|
||||
continue;
|
||||
|
||||
if (copy_from_user(&fdt, kimage->segment[i].buf, sizeof(fdt)))
|
||||
continue;
|
||||
|
||||
if (fdt_check_header(&fdt))
|
||||
continue;
|
||||
|
||||
kexec_args[0] = -2;
|
||||
kexec_args[1] = (unsigned long)
|
||||
phys_to_virt((unsigned long)kimage->segment[i].mem);
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int (*_machine_kexec_prepare)(struct kimage *) = uhi_machine_kexec_prepare;
|
||||
|
||||
#else
|
||||
|
||||
int (*_machine_kexec_prepare)(struct kimage *) = NULL;
|
||||
|
||||
#endif /* CONFIG_UHI_BOOT */
|
||||
|
||||
int
|
||||
machine_kexec_prepare(struct kimage *kimage)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (!kexec_nonboot_cpu_func())
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
||||
kexec_image_info(kimage);
|
||||
|
||||
if (_machine_kexec_prepare)
|
||||
return _machine_kexec_prepare(kimage);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -63,11 +113,41 @@ machine_kexec_cleanup(struct kimage *kimage)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void kexec_shutdown_secondary(void *param)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
return;
|
||||
|
||||
/* We won't be sent IPIs any more. */
|
||||
set_cpu_online(cpu, false);
|
||||
|
||||
local_irq_disable();
|
||||
while (!atomic_read(&kexec_ready_to_reboot))
|
||||
cpu_relax();
|
||||
|
||||
kexec_reboot();
|
||||
|
||||
/* NOTREACHED */
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
machine_shutdown(void)
|
||||
{
|
||||
if (_machine_kexec_shutdown)
|
||||
_machine_kexec_shutdown();
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
smp_call_function(kexec_shutdown_secondary, NULL, 0);
|
||||
|
||||
while (num_online_cpus() > 1) {
|
||||
cpu_relax();
|
||||
mdelay(1);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
@@ -79,12 +159,57 @@ machine_crash_shutdown(struct pt_regs *regs)
|
||||
default_machine_crash_shutdown(regs);
|
||||
}
|
||||
|
||||
typedef void (*noretfun_t)(void) __noreturn;
|
||||
#ifdef CONFIG_SMP
|
||||
void kexec_nonboot_cpu_jump(void)
|
||||
{
|
||||
local_flush_icache_range((unsigned long)relocated_kexec_smp_wait,
|
||||
reboot_code_buffer + relocate_new_kernel_size);
|
||||
|
||||
relocated_kexec_smp_wait(NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
void kexec_reboot(void)
|
||||
{
|
||||
void (*do_kexec)(void) __noreturn;
|
||||
|
||||
/*
|
||||
* We know we were online, and there will be no incoming IPIs at
|
||||
* this point. Mark online again before rebooting so that the crash
|
||||
* analysis tool will see us correctly.
|
||||
*/
|
||||
set_cpu_online(smp_processor_id(), true);
|
||||
|
||||
/* Ensure remote CPUs observe that we're online before rebooting. */
|
||||
smp_mb__after_atomic();
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (smp_processor_id() > 0) {
|
||||
/*
|
||||
* Instead of cpu_relax() or wait, this is needed for kexec
|
||||
* smp reboot. Kdump usually doesn't require an smp new
|
||||
* kernel, but kexec may do.
|
||||
*/
|
||||
kexec_nonboot_cpu();
|
||||
|
||||
/* NOTREACHED */
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Make sure we get correct instructions written by the
|
||||
* machine_kexec() CPU.
|
||||
*/
|
||||
local_flush_icache_range(reboot_code_buffer,
|
||||
reboot_code_buffer + relocate_new_kernel_size);
|
||||
|
||||
do_kexec = (void *)reboot_code_buffer;
|
||||
do_kexec();
|
||||
}
|
||||
|
||||
void
|
||||
machine_kexec(struct kimage *image)
|
||||
{
|
||||
unsigned long reboot_code_buffer;
|
||||
unsigned long entry;
|
||||
unsigned long *ptr;
|
||||
|
||||
@@ -118,6 +243,9 @@ machine_kexec(struct kimage *image)
|
||||
*ptr = (unsigned long) phys_to_virt(*ptr);
|
||||
}
|
||||
|
||||
/* Mark offline BEFORE disabling local irq. */
|
||||
set_cpu_online(smp_processor_id(), false);
|
||||
|
||||
/*
|
||||
* we do not want to be bothered.
|
||||
*/
|
||||
@@ -125,6 +253,7 @@ machine_kexec(struct kimage *image)
|
||||
|
||||
printk("Will call new kernel at %08lx\n", image->start);
|
||||
printk("Bye ...\n");
|
||||
/* Make reboot code buffer available to the boot CPU. */
|
||||
__flush_cache_all();
|
||||
#ifdef CONFIG_SMP
|
||||
/* All secondary cpus now may jump to kexec_wait cycle */
|
||||
@@ -133,5 +262,5 @@ machine_kexec(struct kimage *image)
|
||||
smp_wmb();
|
||||
atomic_set(&kexec_ready_to_reboot, 1);
|
||||
#endif
|
||||
((noretfun_t) reboot_code_buffer)();
|
||||
kexec_reboot();
|
||||
}
|
||||
|
@@ -154,40 +154,6 @@ static int __init config7_set(char *str)
|
||||
}
|
||||
__setup("config7=", config7_set);
|
||||
|
||||
/* Experimental cache flush control parameters that should go away some day */
|
||||
int mt_protiflush;
|
||||
int mt_protdflush;
|
||||
int mt_n_iflushes = 1;
|
||||
int mt_n_dflushes = 1;
|
||||
|
||||
static int __init set_protiflush(char *s)
|
||||
{
|
||||
mt_protiflush = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("protiflush", set_protiflush);
|
||||
|
||||
static int __init set_protdflush(char *s)
|
||||
{
|
||||
mt_protdflush = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("protdflush", set_protdflush);
|
||||
|
||||
static int __init niflush(char *s)
|
||||
{
|
||||
get_option(&s, &mt_n_iflushes);
|
||||
return 1;
|
||||
}
|
||||
__setup("niflush=", niflush);
|
||||
|
||||
static int __init ndflush(char *s)
|
||||
{
|
||||
get_option(&s, &mt_n_dflushes);
|
||||
return 1;
|
||||
}
|
||||
__setup("ndflush=", ndflush);
|
||||
|
||||
static unsigned int itc_base;
|
||||
|
||||
static int __init set_itc_base(char *str)
|
||||
@@ -232,16 +198,6 @@ void mips_mt_set_cpuoptions(void)
|
||||
printk("Config7: 0x%08x\n", read_c0_config7());
|
||||
}
|
||||
|
||||
/* Report Cache management debug options */
|
||||
if (mt_protiflush)
|
||||
printk("I-cache flushes single-threaded\n");
|
||||
if (mt_protdflush)
|
||||
printk("D-cache flushes single-threaded\n");
|
||||
if (mt_n_iflushes != 1)
|
||||
printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
|
||||
if (mt_n_dflushes != 1)
|
||||
printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
|
||||
|
||||
if (itc_base != 0) {
|
||||
/*
|
||||
* Configure ITC mapping. This code is very
|
||||
@@ -283,21 +239,6 @@ void mips_mt_set_cpuoptions(void)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Function to protect cache flushes from concurrent execution
|
||||
* depends on MP software model chosen.
|
||||
*/
|
||||
|
||||
void mt_cflush_lockdown(void)
|
||||
{
|
||||
/* FILL IN VSMP and AP/SP VERSIONS HERE */
|
||||
}
|
||||
|
||||
void mt_cflush_release(void)
|
||||
{
|
||||
/* FILL IN VSMP and AP/SP VERSIONS HERE */
|
||||
}
|
||||
|
||||
struct class *mt_class;
|
||||
|
||||
static int __init mt_init(void)
|
||||
|
@@ -146,7 +146,7 @@ int __init do_relocations(void *kbase_old, void *kbase_new, long offset)
|
||||
break;
|
||||
|
||||
type = (*r >> 24) & 0xff;
|
||||
loc_orig = (void *)(kbase_old + ((*r & 0x00ffffff) << 2));
|
||||
loc_orig = kbase_old + ((*r & 0x00ffffff) << 2);
|
||||
loc_new = RELOCATED(loc_orig);
|
||||
|
||||
if (reloc_handlers_rel[type] == NULL) {
|
||||
|
@@ -333,7 +333,7 @@ static void __init finalize_initrd(void)
|
||||
|
||||
maybe_bswap_initrd();
|
||||
|
||||
reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
|
||||
memblock_reserve(__pa(initrd_start), size);
|
||||
initrd_below_start_ok = 1;
|
||||
|
||||
pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
|
||||
@@ -370,20 +370,10 @@ static void __init bootmem_init(void)
|
||||
|
||||
#else /* !CONFIG_SGI_IP27 */
|
||||
|
||||
static unsigned long __init bootmap_bytes(unsigned long pages)
|
||||
{
|
||||
unsigned long bytes = DIV_ROUND_UP(pages, 8);
|
||||
|
||||
return ALIGN(bytes, sizeof(long));
|
||||
}
|
||||
|
||||
static void __init bootmem_init(void)
|
||||
{
|
||||
unsigned long reserved_end;
|
||||
unsigned long mapstart = ~0UL;
|
||||
unsigned long bootmap_size;
|
||||
phys_addr_t ramstart = PHYS_ADDR_MAX;
|
||||
bool bootmap_valid = false;
|
||||
int i;
|
||||
|
||||
/*
|
||||
@@ -395,6 +385,8 @@ static void __init bootmem_init(void)
|
||||
init_initrd();
|
||||
reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
|
||||
|
||||
memblock_reserve(PHYS_OFFSET, reserved_end << PAGE_SHIFT);
|
||||
|
||||
/*
|
||||
* max_low_pfn is not a number of pages. The number of pages
|
||||
* of the system is given by 'max_low_pfn - min_low_pfn'.
|
||||
@@ -442,9 +434,6 @@ static void __init bootmem_init(void)
|
||||
if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
|
||||
continue;
|
||||
#endif
|
||||
if (start >= mapstart)
|
||||
continue;
|
||||
mapstart = max(reserved_end, start);
|
||||
}
|
||||
|
||||
if (min_low_pfn >= max_low_pfn)
|
||||
@@ -456,9 +445,11 @@ static void __init bootmem_init(void)
|
||||
/*
|
||||
* Reserve any memory between the start of RAM and PHYS_OFFSET
|
||||
*/
|
||||
if (ramstart > PHYS_OFFSET)
|
||||
if (ramstart > PHYS_OFFSET) {
|
||||
add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
|
||||
BOOT_MEM_RESERVED);
|
||||
memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
|
||||
}
|
||||
|
||||
if (min_low_pfn > ARCH_PFN_OFFSET) {
|
||||
pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
|
||||
@@ -483,52 +474,6 @@ static void __init bootmem_init(void)
|
||||
max_low_pfn = PFN_DOWN(HIGHMEM_START);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
/*
|
||||
* mapstart should be after initrd_end
|
||||
*/
|
||||
if (initrd_end)
|
||||
mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
|
||||
#endif
|
||||
|
||||
/*
|
||||
* check that mapstart doesn't overlap with any of
|
||||
* memory regions that have been reserved through eg. DTB
|
||||
*/
|
||||
bootmap_size = bootmap_bytes(max_low_pfn - min_low_pfn);
|
||||
|
||||
bootmap_valid = memory_region_available(PFN_PHYS(mapstart),
|
||||
bootmap_size);
|
||||
for (i = 0; i < boot_mem_map.nr_map && !bootmap_valid; i++) {
|
||||
unsigned long mapstart_addr;
|
||||
|
||||
switch (boot_mem_map.map[i].type) {
|
||||
case BOOT_MEM_RESERVED:
|
||||
mapstart_addr = PFN_ALIGN(boot_mem_map.map[i].addr +
|
||||
boot_mem_map.map[i].size);
|
||||
if (PHYS_PFN(mapstart_addr) < mapstart)
|
||||
break;
|
||||
|
||||
bootmap_valid = memory_region_available(mapstart_addr,
|
||||
bootmap_size);
|
||||
if (bootmap_valid)
|
||||
mapstart = PHYS_PFN(mapstart_addr);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!bootmap_valid)
|
||||
panic("No memory area to place a bootmap bitmap");
|
||||
|
||||
/*
|
||||
* Initialize the boot-time allocator with low memory only.
|
||||
*/
|
||||
if (bootmap_size != init_bootmem_node(NODE_DATA(0), mapstart,
|
||||
min_low_pfn, max_low_pfn))
|
||||
panic("Unexpected memory size required for bootmap");
|
||||
|
||||
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
||||
unsigned long start, end;
|
||||
|
||||
@@ -577,9 +522,9 @@ static void __init bootmem_init(void)
|
||||
default:
|
||||
/* Not usable memory */
|
||||
if (start > min_low_pfn && end < max_low_pfn)
|
||||
reserve_bootmem(boot_mem_map.map[i].addr,
|
||||
boot_mem_map.map[i].size,
|
||||
BOOTMEM_DEFAULT);
|
||||
memblock_reserve(boot_mem_map.map[i].addr,
|
||||
boot_mem_map.map[i].size);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -602,15 +547,9 @@ static void __init bootmem_init(void)
|
||||
size = end - start;
|
||||
|
||||
/* Register lowmem ranges */
|
||||
free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
|
||||
memory_present(0, start, end);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve the bootmap memory.
|
||||
*/
|
||||
reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
|
||||
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
/*
|
||||
* The kernel reserves all memory below its _end symbol as bootmem,
|
||||
@@ -642,29 +581,6 @@ static void __init bootmem_init(void)
|
||||
|
||||
#endif /* CONFIG_SGI_IP27 */
|
||||
|
||||
/*
|
||||
* arch_mem_init - initialize memory management subsystem
|
||||
*
|
||||
* o plat_mem_setup() detects the memory configuration and will record detected
|
||||
* memory areas using add_memory_region.
|
||||
*
|
||||
* At this stage the memory configuration of the system is known to the
|
||||
* kernel but generic memory management system is still entirely uninitialized.
|
||||
*
|
||||
* o bootmem_init()
|
||||
* o sparse_init()
|
||||
* o paging_init()
|
||||
* o dma_contiguous_reserve()
|
||||
*
|
||||
* At this stage the bootmem allocator is ready to use.
|
||||
*
|
||||
* NOTE: historically plat_mem_setup did the entire platform initialization.
|
||||
* This was rather impractical because it meant plat_mem_setup had to
|
||||
* get away without any kind of memory allocator. To keep old code from
|
||||
* breaking plat_setup was just renamed to plat_mem_setup and a second platform
|
||||
* initialization hook for anything else was introduced.
|
||||
*/
|
||||
|
||||
static int usermem __initdata;
|
||||
|
||||
static int __init early_parse_mem(char *p)
|
||||
@@ -841,6 +757,28 @@ static void __init request_crashkernel(struct resource *res)
|
||||
#define BUILTIN_EXTEND_WITH_PROM \
|
||||
IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
|
||||
|
||||
/*
|
||||
* arch_mem_init - initialize memory management subsystem
|
||||
*
|
||||
* o plat_mem_setup() detects the memory configuration and will record detected
|
||||
* memory areas using add_memory_region.
|
||||
*
|
||||
* At this stage the memory configuration of the system is known to the
|
||||
* kernel but generic memory management system is still entirely uninitialized.
|
||||
*
|
||||
* o bootmem_init()
|
||||
* o sparse_init()
|
||||
* o paging_init()
|
||||
* o dma_contiguous_reserve()
|
||||
*
|
||||
* At this stage the bootmem allocator is ready to use.
|
||||
*
|
||||
* NOTE: historically plat_mem_setup did the entire platform initialization.
|
||||
* This was rather impractical because it meant plat_mem_setup had to
|
||||
* get away without any kind of memory allocator. To keep old code from
|
||||
* breaking plat_setup was just renamed to plat_mem_setup and a second platform
|
||||
* initialization hook for anything else was introduced.
|
||||
*/
|
||||
static void __init arch_mem_init(char **cmdline_p)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
@@ -916,21 +854,29 @@ static void __init arch_mem_init(char **cmdline_p)
|
||||
early_init_fdt_scan_reserved_mem();
|
||||
|
||||
bootmem_init();
|
||||
|
||||
/*
|
||||
* Prevent memblock from allocating high memory.
|
||||
* This cannot be done before max_low_pfn is detected, so up
|
||||
* to this point is possible to only reserve physical memory
|
||||
* with memblock_reserve; memblock_virt_alloc* can be used
|
||||
* only after this point
|
||||
*/
|
||||
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
|
||||
|
||||
#ifdef CONFIG_PROC_VMCORE
|
||||
if (setup_elfcorehdr && setup_elfcorehdr_size) {
|
||||
printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
|
||||
setup_elfcorehdr, setup_elfcorehdr_size);
|
||||
reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
|
||||
BOOTMEM_DEFAULT);
|
||||
memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size);
|
||||
}
|
||||
#endif
|
||||
|
||||
mips_parse_crashkernel();
|
||||
#ifdef CONFIG_KEXEC
|
||||
if (crashk_res.start != crashk_res.end)
|
||||
reserve_bootmem(crashk_res.start,
|
||||
crashk_res.end - crashk_res.start + 1,
|
||||
BOOTMEM_DEFAULT);
|
||||
memblock_reserve(crashk_res.start,
|
||||
crashk_res.end - crashk_res.start + 1);
|
||||
#endif
|
||||
device_tree_init();
|
||||
sparse_init();
|
||||
@@ -940,7 +886,7 @@ static void __init arch_mem_init(char **cmdline_p)
|
||||
/* Tell bootmem about cma reserved memblock section */
|
||||
for_each_memblock(reserved, reg)
|
||||
if (reg->size != 0)
|
||||
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
|
||||
memblock_reserve(reg->base, reg->size);
|
||||
|
||||
reserve_bootmem_region(__pa_symbol(&__nosave_begin),
|
||||
__pa_symbol(&__nosave_end)); /* Reserve for hibernation */
|
||||
|
@@ -25,6 +25,7 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kexec.h>
|
||||
|
||||
#include <asm/time.h>
|
||||
#include <asm/pgtable.h>
|
||||
@@ -423,6 +424,9 @@ const struct plat_smp_ops bmips43xx_smp_ops = {
|
||||
.cpu_disable = bmips_cpu_disable,
|
||||
.cpu_die = bmips_cpu_die,
|
||||
#endif
|
||||
#ifdef CONFIG_KEXEC
|
||||
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
|
||||
#endif
|
||||
};
|
||||
|
||||
const struct plat_smp_ops bmips5000_smp_ops = {
|
||||
@@ -437,6 +441,9 @@ const struct plat_smp_ops bmips5000_smp_ops = {
|
||||
.cpu_disable = bmips_cpu_disable,
|
||||
.cpu_die = bmips_cpu_die,
|
||||
#endif
|
||||
#ifdef CONFIG_KEXEC
|
||||
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@@ -398,6 +398,55 @@ static void cps_smp_finish(void)
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
|
||||
|
||||
enum cpu_death {
|
||||
CPU_DEATH_HALT,
|
||||
CPU_DEATH_POWER,
|
||||
};
|
||||
|
||||
static void cps_shutdown_this_cpu(enum cpu_death death)
|
||||
{
|
||||
unsigned int cpu, core, vpe_id;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
core = cpu_core(&cpu_data[cpu]);
|
||||
|
||||
if (death == CPU_DEATH_HALT) {
|
||||
vpe_id = cpu_vpe_id(&cpu_data[cpu]);
|
||||
|
||||
pr_debug("Halting core %d VP%d\n", core, vpe_id);
|
||||
if (cpu_has_mipsmt) {
|
||||
/* Halt this TC */
|
||||
write_c0_tchalt(TCHALT_H);
|
||||
instruction_hazard();
|
||||
} else if (cpu_has_vp) {
|
||||
write_cpc_cl_vp_stop(1 << vpe_id);
|
||||
|
||||
/* Ensure that the VP_STOP register is written */
|
||||
wmb();
|
||||
}
|
||||
} else {
|
||||
pr_debug("Gating power to core %d\n", core);
|
||||
/* Power down the core */
|
||||
cps_pm_enter_state(CPS_PM_POWER_GATED);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
|
||||
static void cps_kexec_nonboot_cpu(void)
|
||||
{
|
||||
if (cpu_has_mipsmt || cpu_has_vp)
|
||||
cps_shutdown_this_cpu(CPU_DEATH_HALT);
|
||||
else
|
||||
cps_shutdown_this_cpu(CPU_DEATH_POWER);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_KEXEC */
|
||||
|
||||
#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
static int cps_cpu_disable(void)
|
||||
@@ -421,19 +470,15 @@ static int cps_cpu_disable(void)
|
||||
}
|
||||
|
||||
static unsigned cpu_death_sibling;
|
||||
static enum {
|
||||
CPU_DEATH_HALT,
|
||||
CPU_DEATH_POWER,
|
||||
} cpu_death;
|
||||
static enum cpu_death cpu_death;
|
||||
|
||||
void play_dead(void)
|
||||
{
|
||||
unsigned int cpu, core, vpe_id;
|
||||
unsigned int cpu;
|
||||
|
||||
local_irq_disable();
|
||||
idle_task_exit();
|
||||
cpu = smp_processor_id();
|
||||
core = cpu_core(&cpu_data[cpu]);
|
||||
cpu_death = CPU_DEATH_POWER;
|
||||
|
||||
pr_debug("CPU%d going offline\n", cpu);
|
||||
@@ -456,25 +501,7 @@ void play_dead(void)
|
||||
/* This CPU has chosen its way out */
|
||||
(void)cpu_report_death();
|
||||
|
||||
if (cpu_death == CPU_DEATH_HALT) {
|
||||
vpe_id = cpu_vpe_id(&cpu_data[cpu]);
|
||||
|
||||
pr_debug("Halting core %d VP%d\n", core, vpe_id);
|
||||
if (cpu_has_mipsmt) {
|
||||
/* Halt this TC */
|
||||
write_c0_tchalt(TCHALT_H);
|
||||
instruction_hazard();
|
||||
} else if (cpu_has_vp) {
|
||||
write_cpc_cl_vp_stop(1 << vpe_id);
|
||||
|
||||
/* Ensure that the VP_STOP register is written */
|
||||
wmb();
|
||||
}
|
||||
} else {
|
||||
pr_debug("Gating power to core %d\n", core);
|
||||
/* Power down the core */
|
||||
cps_pm_enter_state(CPS_PM_POWER_GATED);
|
||||
}
|
||||
cps_shutdown_this_cpu(cpu_death);
|
||||
|
||||
/* This should never be reached */
|
||||
panic("Failed to offline CPU %u", cpu);
|
||||
@@ -593,6 +620,9 @@ static const struct plat_smp_ops cps_smp_ops = {
|
||||
.cpu_disable = cps_cpu_disable,
|
||||
.cpu_die = cps_cpu_die,
|
||||
#endif
|
||||
#ifdef CONFIG_KEXEC
|
||||
.kexec_nonboot_cpu = cps_kexec_nonboot_cpu,
|
||||
#endif
|
||||
};
|
||||
|
||||
bool mips_cps_smp_in_use(void)
|
||||
|
@@ -29,6 +29,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/kgdb.h>
|
||||
@@ -348,7 +349,7 @@ static void __show_regs(const struct pt_regs *regs)
|
||||
*/
|
||||
void show_regs(struct pt_regs *regs)
|
||||
{
|
||||
__show_regs((struct pt_regs *)regs);
|
||||
__show_regs(regs);
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
@@ -2260,8 +2261,10 @@ void __init trap_init(void)
|
||||
unsigned long size = 0x200 + VECTORSPACING*64;
|
||||
phys_addr_t ebase_pa;
|
||||
|
||||
memblock_set_bottom_up(true);
|
||||
ebase = (unsigned long)
|
||||
__alloc_bootmem(size, 1 << fls(size), 0);
|
||||
memblock_set_bottom_up(false);
|
||||
|
||||
/*
|
||||
* Try to ensure ebase resides in KSeg0 if possible.
|
||||
|
@@ -130,7 +130,7 @@ do { \
|
||||
: "r" (addr), "i" (-EFAULT)); \
|
||||
} while(0)
|
||||
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
|
||||
#define _LoadW(addr, value, res, type) \
|
||||
do { \
|
||||
__asm__ __volatile__ ( \
|
||||
@@ -151,8 +151,8 @@ do { \
|
||||
: "r" (addr), "i" (-EFAULT)); \
|
||||
} while(0)
|
||||
|
||||
#else
|
||||
/* MIPSR6 has no lwl instruction */
|
||||
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
||||
/* For CPUs without lwl instruction */
|
||||
#define _LoadW(addr, value, res, type) \
|
||||
do { \
|
||||
__asm__ __volatile__ ( \
|
||||
@@ -186,7 +186,7 @@ do { \
|
||||
: "r" (addr), "i" (-EFAULT)); \
|
||||
} while(0)
|
||||
|
||||
#endif /* CONFIG_CPU_MIPSR6 */
|
||||
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
||||
|
||||
#define _LoadHWU(addr, value, res, type) \
|
||||
do { \
|
||||
@@ -212,7 +212,7 @@ do { \
|
||||
: "r" (addr), "i" (-EFAULT)); \
|
||||
} while(0)
|
||||
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
|
||||
#define _LoadWU(addr, value, res, type) \
|
||||
do { \
|
||||
__asm__ __volatile__ ( \
|
||||
@@ -255,8 +255,8 @@ do { \
|
||||
: "r" (addr), "i" (-EFAULT)); \
|
||||
} while(0)
|
||||
|
||||
#else
|
||||
/* MIPSR6 has not lwl and ldl instructions */
|
||||
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
||||
/* For CPUs without lwl and ldl instructions */
|
||||
#define _LoadWU(addr, value, res, type) \
|
||||
do { \
|
||||
__asm__ __volatile__ ( \
|
||||
@@ -339,7 +339,7 @@ do { \
|
||||
: "r" (addr), "i" (-EFAULT)); \
|
||||
} while(0)
|
||||
|
||||
#endif /* CONFIG_CPU_MIPSR6 */
|
||||
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
||||
|
||||
|
||||
#define _StoreHW(addr, value, res, type) \
|
||||
@@ -365,7 +365,7 @@ do { \
|
||||
: "r" (value), "r" (addr), "i" (-EFAULT));\
|
||||
} while(0)
|
||||
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
|
||||
#define _StoreW(addr, value, res, type) \
|
||||
do { \
|
||||
__asm__ __volatile__ ( \
|
||||
@@ -406,8 +406,7 @@ do { \
|
||||
: "r" (value), "r" (addr), "i" (-EFAULT)); \
|
||||
} while(0)
|
||||
|
||||
#else
|
||||
/* MIPSR6 has no swl and sdl instructions */
|
||||
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
||||
#define _StoreW(addr, value, res, type) \
|
||||
do { \
|
||||
__asm__ __volatile__ ( \
|
||||
@@ -483,7 +482,7 @@ do { \
|
||||
: "memory"); \
|
||||
} while(0)
|
||||
|
||||
#endif /* CONFIG_CPU_MIPSR6 */
|
||||
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
||||
|
||||
#else /* __BIG_ENDIAN */
|
||||
|
||||
@@ -509,7 +508,7 @@ do { \
|
||||
: "r" (addr), "i" (-EFAULT)); \
|
||||
} while(0)
|
||||
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
|
||||
#define _LoadW(addr, value, res, type) \
|
||||
do { \
|
||||
__asm__ __volatile__ ( \
|
||||
@@ -530,8 +529,8 @@ do { \
|
||||
: "r" (addr), "i" (-EFAULT)); \
|
||||
} while(0)
|
||||
|
||||
#else
|
||||
/* MIPSR6 has no lwl instruction */
|
||||
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
||||
/* For CPUs without lwl instruction */
|
||||
#define _LoadW(addr, value, res, type) \
|
||||
do { \
|
||||
__asm__ __volatile__ ( \
|
||||
@@ -565,7 +564,7 @@ do { \
|
||||
: "r" (addr), "i" (-EFAULT)); \
|
||||
} while(0)
|
||||
|
||||
#endif /* CONFIG_CPU_MIPSR6 */
|
||||
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
||||
|
||||
|
||||
#define _LoadHWU(addr, value, res, type) \
|
||||
@@ -592,7 +591,7 @@ do { \
|
||||
: "r" (addr), "i" (-EFAULT)); \
|
||||
} while(0)
|
||||
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
|
||||
#define _LoadWU(addr, value, res, type) \
|
||||
do { \
|
||||
__asm__ __volatile__ ( \
|
||||
@@ -635,8 +634,8 @@ do { \
|
||||
: "r" (addr), "i" (-EFAULT)); \
|
||||
} while(0)
|
||||
|
||||
#else
|
||||
/* MIPSR6 has not lwl and ldl instructions */
|
||||
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
||||
/* For CPUs without lwl and ldl instructions */
|
||||
#define _LoadWU(addr, value, res, type) \
|
||||
do { \
|
||||
__asm__ __volatile__ ( \
|
||||
@@ -718,7 +717,7 @@ do { \
|
||||
: "=&r" (value), "=r" (res) \
|
||||
: "r" (addr), "i" (-EFAULT)); \
|
||||
} while(0)
|
||||
#endif /* CONFIG_CPU_MIPSR6 */
|
||||
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
||||
|
||||
#define _StoreHW(addr, value, res, type) \
|
||||
do { \
|
||||
@@ -743,7 +742,7 @@ do { \
|
||||
: "r" (value), "r" (addr), "i" (-EFAULT));\
|
||||
} while(0)
|
||||
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
|
||||
#define _StoreW(addr, value, res, type) \
|
||||
do { \
|
||||
__asm__ __volatile__ ( \
|
||||
@@ -784,8 +783,8 @@ do { \
|
||||
: "r" (value), "r" (addr), "i" (-EFAULT)); \
|
||||
} while(0)
|
||||
|
||||
#else
|
||||
/* MIPSR6 has no swl and sdl instructions */
|
||||
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
||||
/* For CPUs without swl and sdl instructions */
|
||||
#define _StoreW(addr, value, res, type) \
|
||||
do { \
|
||||
__asm__ __volatile__ ( \
|
||||
@@ -861,7 +860,7 @@ do { \
|
||||
: "memory"); \
|
||||
} while(0)
|
||||
|
||||
#endif /* CONFIG_CPU_MIPSR6 */
|
||||
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
|
||||
#endif
|
||||
|
||||
#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)
|
||||
|
Reference in New Issue
Block a user