Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Pull tile updates from Chris Metcalf: "These changes cover a range of new arch/tile features and optimizations. They've been through LKML review and on linux-next for a month or so. There's also one bug-fix that just missed 3.4, which I've marked for stable." Fixed up trivial conflict in arch/tile/Kconfig (new added tile Kconfig entries clashing with the generic timer/clockevents changes). * git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: tile: default to tilegx_defconfig for ARCH=tile tile: fix bug where fls(0) was not returning 0 arch/tile: mark TILEGX as not EXPERIMENTAL tile/mm/fault.c: Port OOM changes to handle_page_fault arch/tile: add descriptive text if the kernel reports a bad trap arch/tile: allow querying cpu module information from the hypervisor arch/tile: fix hardwall for tilegx and generalize for idn and ipi arch/tile: support multiple huge page sizes dynamically mm: add new arch_make_huge_pte() method for tile support arch/tile: support kexec() for tilegx arch/tile: support <asm/cachectl.h> header for cacheflush() syscall arch/tile: Allow tilegx to build with either 16K or 64K page size arch/tile: optimize get_user/put_user and friends arch/tile: support building big-endian kernel arch/tile: allow building Linux with transparent huge pages enabled arch/tile: use interrupt critical sections less
This commit is contained in:
@@ -9,10 +9,9 @@ obj-y := backtrace.o entry.o irq.o messaging.o \
|
||||
intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
|
||||
|
||||
obj-$(CONFIG_HARDWALL) += hardwall.o
|
||||
obj-$(CONFIG_TILEGX) += futex_64.o
|
||||
obj-$(CONFIG_COMPAT) += compat.o compat_signal.o
|
||||
obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
|
||||
obj-$(CONFIG_MODULES) += module.o
|
||||
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel_$(BITS).o
|
||||
obj-$(CONFIG_PCI) += pci.o
|
||||
|
@@ -100,8 +100,9 @@ STD_ENTRY(smp_nap)
|
||||
*/
|
||||
STD_ENTRY(_cpu_idle)
|
||||
movei r1, 1
|
||||
IRQ_ENABLE_LOAD(r2, r3)
|
||||
mtspr INTERRUPT_CRITICAL_SECTION, r1
|
||||
IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
|
||||
IRQ_ENABLE_APPLY(r2, r3) /* unmask, but still with ICS set */
|
||||
mtspr INTERRUPT_CRITICAL_SECTION, zero
|
||||
.global _cpu_idle_nap
|
||||
_cpu_idle_nap:
|
||||
|
Filskillnaden har hållits tillbaka eftersom den är för stor
Load Diff
@@ -69,7 +69,7 @@ ENTRY(_start)
|
||||
}
|
||||
{
|
||||
moveli lr, lo16(1f)
|
||||
move r5, zero
|
||||
moveli r5, CTX_PAGE_FLAG
|
||||
}
|
||||
{
|
||||
auli lr, lr, ha16(1f)
|
||||
@@ -141,11 +141,11 @@ ENTRY(empty_zero_page)
|
||||
|
||||
.macro PTE va, cpa, bits1, no_org=0
|
||||
.ifeq \no_org
|
||||
.org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE
|
||||
.org swapper_pg_dir + PGD_INDEX(\va) * HV_PTE_SIZE
|
||||
.endif
|
||||
.word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
|
||||
(HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
|
||||
.word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32))
|
||||
.word (\bits1) | (HV_CPA_TO_PTFN(\cpa) << (HV_PTE_INDEX_PTFN - 32))
|
||||
.endm
|
||||
|
||||
__PAGE_ALIGNED_DATA
|
||||
@@ -166,7 +166,7 @@ ENTRY(swapper_pg_dir)
|
||||
/* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
|
||||
PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
|
||||
(1 << (HV_PTE_INDEX_EXECUTABLE - 32))
|
||||
.org swapper_pg_dir + HV_L1_SIZE
|
||||
.org swapper_pg_dir + PGDIR_SIZE
|
||||
END(swapper_pg_dir)
|
||||
|
||||
/*
|
||||
|
@@ -114,7 +114,7 @@ ENTRY(_start)
|
||||
shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
|
||||
}
|
||||
{
|
||||
move r3, zero
|
||||
moveli r3, CTX_PAGE_FLAG
|
||||
j hv_install_context
|
||||
}
|
||||
1:
|
||||
@@ -210,19 +210,19 @@ ENTRY(empty_zero_page)
|
||||
.macro PTE cpa, bits1
|
||||
.quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
|
||||
HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
|
||||
(\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN)
|
||||
(\bits1) | (HV_CPA_TO_PTFN(\cpa) << HV_PTE_INDEX_PTFN)
|
||||
.endm
|
||||
|
||||
__PAGE_ALIGNED_DATA
|
||||
.align PAGE_SIZE
|
||||
ENTRY(swapper_pg_dir)
|
||||
.org swapper_pg_dir + HV_L0_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
|
||||
.org swapper_pg_dir + PGD_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
|
||||
.Lsv_data_pmd:
|
||||
.quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
|
||||
.org swapper_pg_dir + HV_L0_INDEX(MEM_SV_START) * HV_PTE_SIZE
|
||||
.org swapper_pg_dir + PGD_INDEX(MEM_SV_START) * HV_PTE_SIZE
|
||||
.Lsv_code_pmd:
|
||||
.quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
|
||||
.org swapper_pg_dir + HV_L0_SIZE
|
||||
.org swapper_pg_dir + SIZEOF_PGD
|
||||
END(swapper_pg_dir)
|
||||
|
||||
.align HV_PAGE_TABLE_ALIGN
|
||||
@@ -233,11 +233,11 @@ ENTRY(temp_data_pmd)
|
||||
* permissions later.
|
||||
*/
|
||||
.set addr, 0
|
||||
.rept HV_L1_ENTRIES
|
||||
.rept PTRS_PER_PMD
|
||||
PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
|
||||
.set addr, addr + HV_PAGE_SIZE_LARGE
|
||||
.set addr, addr + HPAGE_SIZE
|
||||
.endr
|
||||
.org temp_data_pmd + HV_L1_SIZE
|
||||
.org temp_data_pmd + SIZEOF_PMD
|
||||
END(temp_data_pmd)
|
||||
|
||||
.align HV_PAGE_TABLE_ALIGN
|
||||
@@ -248,11 +248,11 @@ ENTRY(temp_code_pmd)
|
||||
* permissions later.
|
||||
*/
|
||||
.set addr, 0
|
||||
.rept HV_L1_ENTRIES
|
||||
.rept PTRS_PER_PMD
|
||||
PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
|
||||
.set addr, addr + HV_PAGE_SIZE_LARGE
|
||||
.set addr, addr + HPAGE_SIZE
|
||||
.endr
|
||||
.org temp_code_pmd + HV_L1_SIZE
|
||||
.org temp_code_pmd + SIZEOF_PMD
|
||||
END(temp_code_pmd)
|
||||
|
||||
/*
|
||||
|
@@ -55,4 +55,5 @@ hv_store_mapping = TEXT_OFFSET + 0x106a0;
|
||||
hv_inquire_realpa = TEXT_OFFSET + 0x106c0;
|
||||
hv_flush_all = TEXT_OFFSET + 0x106e0;
|
||||
hv_get_ipi_pte = TEXT_OFFSET + 0x10700;
|
||||
hv_glue_internals = TEXT_OFFSET + 0x10720;
|
||||
hv_set_pte_super_shift = TEXT_OFFSET + 0x10720;
|
||||
hv_glue_internals = TEXT_OFFSET + 0x10740;
|
||||
|
@@ -220,7 +220,9 @@ intvec_\vecname:
|
||||
* This routine saves just the first four registers, plus the
|
||||
* stack context so we can do proper backtracing right away,
|
||||
* and defers to handle_interrupt to save the rest.
|
||||
* The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
|
||||
* The backtracer needs pc, ex1, lr, sp, r52, and faultnum,
|
||||
* and needs sp set to its final location at the bottom of
|
||||
* the stack frame.
|
||||
*/
|
||||
addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
|
||||
wh64 r0 /* cache line 7 */
|
||||
@@ -450,23 +452,6 @@ intvec_\vecname:
|
||||
push_reg r5, r52
|
||||
st r52, r4
|
||||
|
||||
/* Load tp with our per-cpu offset. */
|
||||
#ifdef CONFIG_SMP
|
||||
{
|
||||
mfspr r20, SPR_SYSTEM_SAVE_K_0
|
||||
moveli r21, hw2_last(__per_cpu_offset)
|
||||
}
|
||||
{
|
||||
shl16insli r21, r21, hw1(__per_cpu_offset)
|
||||
bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
|
||||
}
|
||||
shl16insli r21, r21, hw0(__per_cpu_offset)
|
||||
shl3add r20, r20, r21
|
||||
ld tp, r20
|
||||
#else
|
||||
move tp, zero
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If we will be returning to the kernel, we will need to
|
||||
* reset the interrupt masks to the state they had before.
|
||||
@@ -489,6 +474,44 @@ intvec_\vecname:
|
||||
.endif
|
||||
st r21, r32
|
||||
|
||||
/*
|
||||
* we've captured enough state to the stack (including in
|
||||
* particular our EX_CONTEXT state) that we can now release
|
||||
* the interrupt critical section and replace it with our
|
||||
* standard "interrupts disabled" mask value. This allows
|
||||
* synchronous interrupts (and profile interrupts) to punch
|
||||
* through from this point onwards.
|
||||
*
|
||||
* It's important that no code before this point touch memory
|
||||
* other than our own stack (to keep the invariant that this
|
||||
* is all that gets touched under ICS), and that no code after
|
||||
* this point reference any interrupt-specific SPR, in particular
|
||||
* the EX_CONTEXT_K_ values.
|
||||
*/
|
||||
.ifc \function,handle_nmi
|
||||
IRQ_DISABLE_ALL(r20)
|
||||
.else
|
||||
IRQ_DISABLE(r20, r21)
|
||||
.endif
|
||||
mtspr INTERRUPT_CRITICAL_SECTION, zero
|
||||
|
||||
/* Load tp with our per-cpu offset. */
|
||||
#ifdef CONFIG_SMP
|
||||
{
|
||||
mfspr r20, SPR_SYSTEM_SAVE_K_0
|
||||
moveli r21, hw2_last(__per_cpu_offset)
|
||||
}
|
||||
{
|
||||
shl16insli r21, r21, hw1(__per_cpu_offset)
|
||||
bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
|
||||
}
|
||||
shl16insli r21, r21, hw0(__per_cpu_offset)
|
||||
shl3add r20, r20, r21
|
||||
ld tp, r20
|
||||
#else
|
||||
move tp, zero
|
||||
#endif
|
||||
|
||||
#ifdef __COLLECT_LINKER_FEEDBACK__
|
||||
/*
|
||||
* Notify the feedback routines that we were in the
|
||||
@@ -512,21 +535,6 @@ intvec_\vecname:
|
||||
FEEDBACK_ENTER(\function)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* we've captured enough state to the stack (including in
|
||||
* particular our EX_CONTEXT state) that we can now release
|
||||
* the interrupt critical section and replace it with our
|
||||
* standard "interrupts disabled" mask value. This allows
|
||||
* synchronous interrupts (and profile interrupts) to punch
|
||||
* through from this point onwards.
|
||||
*/
|
||||
.ifc \function,handle_nmi
|
||||
IRQ_DISABLE_ALL(r20)
|
||||
.else
|
||||
IRQ_DISABLE(r20, r21)
|
||||
.endif
|
||||
mtspr INTERRUPT_CRITICAL_SECTION, zero
|
||||
|
||||
/*
|
||||
* Prepare the first 256 stack bytes to be rapidly accessible
|
||||
* without having to fetch the background data.
|
||||
@@ -736,9 +744,10 @@ STD_ENTRY(interrupt_return)
|
||||
beqzt r30, .Lrestore_regs
|
||||
j 3f
|
||||
2: TRACE_IRQS_ON
|
||||
IRQ_ENABLE_LOAD(r20, r21)
|
||||
movei r0, 1
|
||||
mtspr INTERRUPT_CRITICAL_SECTION, r0
|
||||
IRQ_ENABLE(r20, r21)
|
||||
IRQ_ENABLE_APPLY(r20, r21)
|
||||
beqzt r30, .Lrestore_regs
|
||||
3:
|
||||
|
||||
@@ -755,7 +764,6 @@ STD_ENTRY(interrupt_return)
|
||||
* that will save some cycles if this turns out to be a syscall.
|
||||
*/
|
||||
.Lrestore_regs:
|
||||
FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
|
||||
|
||||
/*
|
||||
* Rotate so we have one high bit and one low bit to test.
|
||||
@@ -1249,7 +1257,7 @@ STD_ENTRY(fill_ra_stack)
|
||||
int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
|
||||
int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
|
||||
int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
|
||||
int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
|
||||
int_hand INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
|
||||
int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
|
||||
int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
|
||||
int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
|
||||
|
@@ -31,6 +31,8 @@
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/checksum.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/homecache.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
|
||||
@@ -222,11 +224,22 @@ struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order)
|
||||
return alloc_pages_node(0, gfp_mask, order);
|
||||
}
|
||||
|
||||
/*
|
||||
* Address range in which pa=va mapping is set in setup_quasi_va_is_pa().
|
||||
* For tilepro, PAGE_OFFSET is used since this is the largest possbile value
|
||||
* for tilepro, while for tilegx, we limit it to entire middle level page
|
||||
* table which we assume has been allocated and is undoubtedly large enough.
|
||||
*/
|
||||
#ifndef __tilegx__
|
||||
#define QUASI_VA_IS_PA_ADDR_RANGE PAGE_OFFSET
|
||||
#else
|
||||
#define QUASI_VA_IS_PA_ADDR_RANGE PGDIR_SIZE
|
||||
#endif
|
||||
|
||||
static void setup_quasi_va_is_pa(void)
|
||||
{
|
||||
HV_PTE *pgtable;
|
||||
HV_PTE pte;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
/*
|
||||
* Flush our TLB to prevent conflicts between the previous contents
|
||||
@@ -234,16 +247,22 @@ static void setup_quasi_va_is_pa(void)
|
||||
*/
|
||||
local_flush_tlb_all();
|
||||
|
||||
/* setup VA is PA, at least up to PAGE_OFFSET */
|
||||
|
||||
pgtable = (HV_PTE *)current->mm->pgd;
|
||||
/*
|
||||
* setup VA is PA, at least up to QUASI_VA_IS_PA_ADDR_RANGE.
|
||||
* Note here we assume that level-1 page table is defined by
|
||||
* HPAGE_SIZE.
|
||||
*/
|
||||
pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
|
||||
pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
|
||||
|
||||
for (i = 0; i < pgd_index(PAGE_OFFSET); i++) {
|
||||
for (i = 0; i < (QUASI_VA_IS_PA_ADDR_RANGE >> HPAGE_SHIFT); i++) {
|
||||
unsigned long vaddr = i << HPAGE_SHIFT;
|
||||
pgd_t *pgd = pgd_offset(current->mm, vaddr);
|
||||
pud_t *pud = pud_offset(pgd, vaddr);
|
||||
pte_t *ptep = (pte_t *) pmd_offset(pud, vaddr);
|
||||
unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
|
||||
|
||||
if (pfn_valid(pfn))
|
||||
__set_pte(&pgtable[i], pfn_pte(pfn, pte));
|
||||
__set_pte(ptep, pfn_pte(pfn, pte));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -251,6 +270,7 @@ static void setup_quasi_va_is_pa(void)
|
||||
void machine_kexec(struct kimage *image)
|
||||
{
|
||||
void *reboot_code_buffer;
|
||||
pte_t *ptep;
|
||||
void (*rnk)(unsigned long, void *, unsigned long)
|
||||
__noreturn;
|
||||
|
||||
@@ -266,8 +286,10 @@ void machine_kexec(struct kimage *image)
|
||||
*/
|
||||
homecache_change_page_home(image->control_code_page, 0,
|
||||
smp_processor_id());
|
||||
reboot_code_buffer = vmap(&image->control_code_page, 1, 0,
|
||||
__pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE));
|
||||
reboot_code_buffer = page_address(image->control_code_page);
|
||||
BUG_ON(reboot_code_buffer == NULL);
|
||||
ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer);
|
||||
__set_pte(ptep, pte_mkexec(*ptep));
|
||||
memcpy(reboot_code_buffer, relocate_new_kernel,
|
||||
relocate_new_kernel_size);
|
||||
__flush_icache_range(
|
||||
|
@@ -159,7 +159,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
|
||||
|
||||
switch (ELF_R_TYPE(rel[i].r_info)) {
|
||||
|
||||
#define MUNGE(func) (*location = ((*location & ~func(-1)) | func(value)))
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
# define MUNGE(func) \
|
||||
(*location = ((*location & ~func(-1)) | func(value)))
|
||||
#else
|
||||
/*
|
||||
* Instructions are always little-endian, so when we read them as data,
|
||||
* we have to swap them around before and after modifying them.
|
||||
*/
|
||||
# define MUNGE(func) \
|
||||
(*location = swab64((swab64(*location) & ~func(-1)) | func(value)))
|
||||
#endif
|
||||
|
||||
#ifndef __tilegx__
|
||||
case R_TILE_32:
|
||||
|
@@ -22,6 +22,7 @@
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/mman.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
@@ -128,10 +128,10 @@ void arch_release_thread_info(struct thread_info *info)
|
||||
* Calling deactivate here just frees up the data structures.
|
||||
* If the task we're freeing held the last reference to a
|
||||
* hardwall fd, it would have been released prior to this point
|
||||
* anyway via exit_files(), and "hardwall" would be NULL by now.
|
||||
* anyway via exit_files(), and the hardwall_task.info pointers
|
||||
* would be NULL by now.
|
||||
*/
|
||||
if (info->task->thread.hardwall)
|
||||
hardwall_deactivate(info->task);
|
||||
hardwall_deactivate_all(info->task);
|
||||
#endif
|
||||
|
||||
if (step_state) {
|
||||
@@ -245,7 +245,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
||||
|
||||
#ifdef CONFIG_HARDWALL
|
||||
/* New thread does not own any networks. */
|
||||
p->thread.hardwall = NULL;
|
||||
memset(&p->thread.hardwall[0], 0,
|
||||
sizeof(struct hardwall_task) * HARDWALL_TYPES);
|
||||
#endif
|
||||
|
||||
|
||||
@@ -515,12 +516,7 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
|
||||
|
||||
#ifdef CONFIG_HARDWALL
|
||||
/* Enable or disable access to the network registers appropriately. */
|
||||
if (prev->thread.hardwall != NULL) {
|
||||
if (next->thread.hardwall == NULL)
|
||||
restrict_network_mpls();
|
||||
} else if (next->thread.hardwall != NULL) {
|
||||
grant_network_mpls();
|
||||
}
|
||||
hardwall_switch_tasks(prev, next);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
260
arch/tile/kernel/relocate_kernel_64.S
Normal file
260
arch/tile/kernel/relocate_kernel_64.S
Normal file
@@ -0,0 +1,260 @@
|
||||
/*
|
||||
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* copy new kernel into place and then call hv_reexec
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <arch/chip.h>
|
||||
#include <asm/page.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
#undef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
|
||||
STD_ENTRY(relocate_new_kernel)
|
||||
|
||||
move r30, r0 /* page list */
|
||||
move r31, r1 /* address of page we are on */
|
||||
move r32, r2 /* start address of new kernel */
|
||||
|
||||
shrui r1, r1, PAGE_SHIFT
|
||||
addi r1, r1, 1
|
||||
shli sp, r1, PAGE_SHIFT
|
||||
addi sp, sp, -8
|
||||
/* we now have a stack (whether we need one or not) */
|
||||
|
||||
moveli r40, hw2_last(hv_console_putc)
|
||||
shl16insli r40, r40, hw1(hv_console_putc)
|
||||
shl16insli r40, r40, hw0(hv_console_putc)
|
||||
|
||||
#ifdef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
moveli r0, 'r'
|
||||
jalr r40
|
||||
|
||||
moveli r0, '_'
|
||||
jalr r40
|
||||
|
||||
moveli r0, 'n'
|
||||
jalr r40
|
||||
|
||||
moveli r0, '_'
|
||||
jalr r40
|
||||
|
||||
moveli r0, 'k'
|
||||
jalr r40
|
||||
|
||||
moveli r0, '\n'
|
||||
jalr r40
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Throughout this code r30 is pointer to the element of page
|
||||
* list we are working on.
|
||||
*
|
||||
* Normally we get to the next element of the page list by
|
||||
* incrementing r30 by eight. The exception is if the element
|
||||
* on the page list is an IND_INDIRECTION in which case we use
|
||||
* the element with the low bits masked off as the new value
|
||||
* of r30.
|
||||
*
|
||||
* To get this started, we need the value passed to us (which
|
||||
* will always be an IND_INDIRECTION) in memory somewhere with
|
||||
* r30 pointing at it. To do that, we push the value passed
|
||||
* to us on the stack and make r30 point to it.
|
||||
*/
|
||||
|
||||
st sp, r30
|
||||
move r30, sp
|
||||
addi sp, sp, -16
|
||||
|
||||
#if CHIP_HAS_CBOX_HOME_MAP()
|
||||
/*
|
||||
* On TILE-GX, we need to flush all tiles' caches, since we may
|
||||
* have been doing hash-for-home caching there. Note that we
|
||||
* must do this _after_ we're completely done modifying any memory
|
||||
* other than our output buffer (which we know is locally cached).
|
||||
* We want the caches to be fully clean when we do the reexec,
|
||||
* because the hypervisor is going to do this flush again at that
|
||||
* point, and we don't want that second flush to overwrite any memory.
|
||||
*/
|
||||
{
|
||||
move r0, zero /* cache_pa */
|
||||
moveli r1, hw2_last(HV_FLUSH_EVICT_L2)
|
||||
}
|
||||
{
|
||||
shl16insli r1, r1, hw1(HV_FLUSH_EVICT_L2)
|
||||
movei r2, -1 /* cache_cpumask; -1 means all client tiles */
|
||||
}
|
||||
{
|
||||
shl16insli r1, r1, hw0(HV_FLUSH_EVICT_L2) /* cache_control */
|
||||
move r3, zero /* tlb_va */
|
||||
}
|
||||
{
|
||||
move r4, zero /* tlb_length */
|
||||
move r5, zero /* tlb_pgsize */
|
||||
}
|
||||
{
|
||||
move r6, zero /* tlb_cpumask */
|
||||
move r7, zero /* asids */
|
||||
}
|
||||
{
|
||||
moveli r20, hw2_last(hv_flush_remote)
|
||||
move r8, zero /* asidcount */
|
||||
}
|
||||
shl16insli r20, r20, hw1(hv_flush_remote)
|
||||
shl16insli r20, r20, hw0(hv_flush_remote)
|
||||
|
||||
jalr r20
|
||||
#endif
|
||||
|
||||
/* r33 is destination pointer, default to zero */
|
||||
|
||||
moveli r33, 0
|
||||
|
||||
.Lloop: ld r10, r30
|
||||
|
||||
andi r9, r10, 0xf /* low 4 bits tell us what type it is */
|
||||
xor r10, r10, r9 /* r10 is now value with low 4 bits stripped */
|
||||
|
||||
cmpeqi r0, r9, 0x1 /* IND_DESTINATION */
|
||||
beqzt r0, .Ltry2
|
||||
|
||||
move r33, r10
|
||||
|
||||
#ifdef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
moveli r0, 'd'
|
||||
jalr r40
|
||||
#endif
|
||||
|
||||
addi r30, r30, 8
|
||||
j .Lloop
|
||||
|
||||
.Ltry2:
|
||||
cmpeqi r0, r9, 0x2 /* IND_INDIRECTION */
|
||||
beqzt r0, .Ltry4
|
||||
|
||||
move r30, r10
|
||||
|
||||
#ifdef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
moveli r0, 'i'
|
||||
jalr r40
|
||||
#endif
|
||||
|
||||
j .Lloop
|
||||
|
||||
.Ltry4:
|
||||
cmpeqi r0, r9, 0x4 /* IND_DONE */
|
||||
beqzt r0, .Ltry8
|
||||
|
||||
mf
|
||||
|
||||
#ifdef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
moveli r0, 'D'
|
||||
jalr r40
|
||||
moveli r0, '\n'
|
||||
jalr r40
|
||||
#endif
|
||||
|
||||
move r0, r32
|
||||
|
||||
moveli r41, hw2_last(hv_reexec)
|
||||
shl16insli r41, r41, hw1(hv_reexec)
|
||||
shl16insli r41, r41, hw0(hv_reexec)
|
||||
|
||||
jalr r41
|
||||
|
||||
/* we should not get here */
|
||||
|
||||
moveli r0, '?'
|
||||
jalr r40
|
||||
moveli r0, '\n'
|
||||
jalr r40
|
||||
|
||||
j .Lhalt
|
||||
|
||||
.Ltry8: cmpeqi r0, r9, 0x8 /* IND_SOURCE */
|
||||
beqz r0, .Lerr /* unknown type */
|
||||
|
||||
/* copy page at r10 to page at r33 */
|
||||
|
||||
move r11, r33
|
||||
|
||||
moveli r0, hw2_last(PAGE_SIZE)
|
||||
shl16insli r0, r0, hw1(PAGE_SIZE)
|
||||
shl16insli r0, r0, hw0(PAGE_SIZE)
|
||||
add r33, r33, r0
|
||||
|
||||
/* copy word at r10 to word at r11 until r11 equals r33 */
|
||||
|
||||
/* We know page size must be multiple of 8, so we can unroll
|
||||
* 8 times safely without any edge case checking.
|
||||
*
|
||||
* Issue a flush of the destination every 8 words to avoid
|
||||
* incoherence when starting the new kernel. (Now this is
|
||||
* just good paranoia because the hv_reexec call will also
|
||||
* take care of this.)
|
||||
*/
|
||||
|
||||
1:
|
||||
{ ld r0, r10; addi r10, r10, 8 }
|
||||
{ st r11, r0; addi r11, r11, 8 }
|
||||
{ ld r0, r10; addi r10, r10, 8 }
|
||||
{ st r11, r0; addi r11, r11, 8 }
|
||||
{ ld r0, r10; addi r10, r10, 8 }
|
||||
{ st r11, r0; addi r11, r11, 8 }
|
||||
{ ld r0, r10; addi r10, r10, 8 }
|
||||
{ st r11, r0; addi r11, r11, 8 }
|
||||
{ ld r0, r10; addi r10, r10, 8 }
|
||||
{ st r11, r0; addi r11, r11, 8 }
|
||||
{ ld r0, r10; addi r10, r10, 8 }
|
||||
{ st r11, r0; addi r11, r11, 8 }
|
||||
{ ld r0, r10; addi r10, r10, 8 }
|
||||
{ st r11, r0; addi r11, r11, 8 }
|
||||
{ ld r0, r10; addi r10, r10, 8 }
|
||||
{ st r11, r0 }
|
||||
{ flush r11 ; addi r11, r11, 8 }
|
||||
|
||||
cmpeq r0, r33, r11
|
||||
beqzt r0, 1b
|
||||
|
||||
#ifdef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
moveli r0, 's'
|
||||
jalr r40
|
||||
#endif
|
||||
|
||||
addi r30, r30, 8
|
||||
j .Lloop
|
||||
|
||||
|
||||
.Lerr: moveli r0, 'e'
|
||||
jalr r40
|
||||
moveli r0, 'r'
|
||||
jalr r40
|
||||
moveli r0, 'r'
|
||||
jalr r40
|
||||
moveli r0, '\n'
|
||||
jalr r40
|
||||
.Lhalt:
|
||||
moveli r41, hw2_last(hv_halt)
|
||||
shl16insli r41, r41, hw1(hv_halt)
|
||||
shl16insli r41, r41, hw0(hv_halt)
|
||||
|
||||
jalr r41
|
||||
STD_ENDPROC(relocate_new_kernel)
|
||||
|
||||
.section .rodata,"a"
|
||||
|
||||
.globl relocate_new_kernel_size
|
||||
relocate_new_kernel_size:
|
||||
.long .Lend_relocate_new_kernel - relocate_new_kernel
|
@@ -28,6 +28,7 @@
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/cacheflush.h>
|
||||
@@ -49,9 +50,6 @@ char chip_model[64] __write_once;
|
||||
struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
|
||||
EXPORT_SYMBOL(node_data);
|
||||
|
||||
/* We only create bootmem data on node 0. */
|
||||
static bootmem_data_t __initdata node0_bdata;
|
||||
|
||||
/* Information on the NUMA nodes that we compute early */
|
||||
unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES];
|
||||
unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES];
|
||||
@@ -534,37 +532,96 @@ static void __init setup_memory(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __init setup_bootmem_allocator(void)
|
||||
/*
|
||||
* On 32-bit machines, we only put bootmem on the low controller,
|
||||
* since PAs > 4GB can't be used in bootmem. In principle one could
|
||||
* imagine, e.g., multiple 1 GB controllers all of which could support
|
||||
* bootmem, but in practice using controllers this small isn't a
|
||||
* particularly interesting scenario, so we just keep it simple and
|
||||
* use only the first controller for bootmem on 32-bit machines.
|
||||
*/
|
||||
static inline int node_has_bootmem(int nid)
|
||||
{
|
||||
unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn;
|
||||
|
||||
/* Provide a node 0 bdata. */
|
||||
NODE_DATA(0)->bdata = &node0_bdata;
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
/* Don't let boot memory alias the PCI region. */
|
||||
last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn);
|
||||
#ifdef CONFIG_64BIT
|
||||
return 1;
|
||||
#else
|
||||
last_alloc_pfn = max_low_pfn;
|
||||
return nid == 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned long alloc_bootmem_pfn(int nid,
|
||||
unsigned long size,
|
||||
unsigned long goal)
|
||||
{
|
||||
void *kva = __alloc_bootmem_node(NODE_DATA(nid), size,
|
||||
PAGE_SIZE, goal);
|
||||
unsigned long pfn = kaddr_to_pfn(kva);
|
||||
BUG_ON(goal && PFN_PHYS(pfn) != goal);
|
||||
return pfn;
|
||||
}
|
||||
|
||||
static void __init setup_bootmem_allocator_node(int i)
|
||||
{
|
||||
unsigned long start, end, mapsize, mapstart;
|
||||
|
||||
if (node_has_bootmem(i)) {
|
||||
NODE_DATA(i)->bdata = &bootmem_node_data[i];
|
||||
} else {
|
||||
/* Share controller zero's bdata for now. */
|
||||
NODE_DATA(i)->bdata = &bootmem_node_data[0];
|
||||
return;
|
||||
}
|
||||
|
||||
/* Skip up to after the bss in node 0. */
|
||||
start = (i == 0) ? min_low_pfn : node_start_pfn[i];
|
||||
|
||||
/* Only lowmem, if we're a HIGHMEM build. */
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
end = node_lowmem_end_pfn[i];
|
||||
#else
|
||||
end = node_end_pfn[i];
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialize the boot-time allocator (with low memory only):
|
||||
* The first argument says where to put the bitmap, and the
|
||||
* second says where the end of allocatable memory is.
|
||||
*/
|
||||
bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn);
|
||||
/* No memory here. */
|
||||
if (end == start)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Let the bootmem allocator use all the space we've given it
|
||||
* except for its own bitmap.
|
||||
*/
|
||||
first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size);
|
||||
if (first_alloc_pfn >= last_alloc_pfn)
|
||||
early_panic("Not enough memory on controller 0 for bootmem\n");
|
||||
/* Figure out where the bootmem bitmap is located. */
|
||||
mapsize = bootmem_bootmap_pages(end - start);
|
||||
if (i == 0) {
|
||||
/* Use some space right before the heap on node 0. */
|
||||
mapstart = start;
|
||||
start += mapsize;
|
||||
} else {
|
||||
/* Allocate bitmap on node 0 to avoid page table issues. */
|
||||
mapstart = alloc_bootmem_pfn(0, PFN_PHYS(mapsize), 0);
|
||||
}
|
||||
|
||||
free_bootmem(PFN_PHYS(first_alloc_pfn),
|
||||
PFN_PHYS(last_alloc_pfn - first_alloc_pfn));
|
||||
/* Initialize a node. */
|
||||
init_bootmem_node(NODE_DATA(i), mapstart, start, end);
|
||||
|
||||
/* Free all the space back into the allocator. */
|
||||
free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start));
|
||||
|
||||
#if defined(CONFIG_PCI)
|
||||
/*
|
||||
* Throw away any memory aliased by the PCI region. FIXME: this
|
||||
* is a temporary hack to work around bug 10502, and needs to be
|
||||
* fixed properly.
|
||||
*/
|
||||
if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start)
|
||||
reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn),
|
||||
PFN_PHYS(pci_reserve_end_pfn -
|
||||
pci_reserve_start_pfn),
|
||||
BOOTMEM_EXCLUSIVE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __init setup_bootmem_allocator(void)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < MAX_NUMNODES; ++i)
|
||||
setup_bootmem_allocator_node(i);
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
if (crashk_res.start != crashk_res.end)
|
||||
@@ -595,14 +652,6 @@ static int __init percpu_size(void)
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal)
|
||||
{
|
||||
void *kva = __alloc_bootmem(size, PAGE_SIZE, goal);
|
||||
unsigned long pfn = kaddr_to_pfn(kva);
|
||||
BUG_ON(goal && PFN_PHYS(pfn) != goal);
|
||||
return pfn;
|
||||
}
|
||||
|
||||
static void __init zone_sizes_init(void)
|
||||
{
|
||||
unsigned long zones_size[MAX_NR_ZONES] = { 0 };
|
||||
@@ -640,21 +689,22 @@ static void __init zone_sizes_init(void)
|
||||
* though, there'll be no lowmem, so we just alloc_bootmem
|
||||
* the memmap. There will be no percpu memory either.
|
||||
*/
|
||||
if (__pfn_to_highbits(start) == 0) {
|
||||
/* In low PAs, allocate via bootmem. */
|
||||
if (i != 0 && cpu_isset(i, isolnodes)) {
|
||||
node_memmap_pfn[i] =
|
||||
alloc_bootmem_pfn(0, memmap_size, 0);
|
||||
BUG_ON(node_percpu[i] != 0);
|
||||
} else if (node_has_bootmem(start)) {
|
||||
unsigned long goal = 0;
|
||||
node_memmap_pfn[i] =
|
||||
alloc_bootmem_pfn(memmap_size, goal);
|
||||
alloc_bootmem_pfn(i, memmap_size, 0);
|
||||
if (kdata_huge)
|
||||
goal = PFN_PHYS(lowmem_end) - node_percpu[i];
|
||||
if (node_percpu[i])
|
||||
node_percpu_pfn[i] =
|
||||
alloc_bootmem_pfn(node_percpu[i], goal);
|
||||
} else if (cpu_isset(i, isolnodes)) {
|
||||
node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0);
|
||||
BUG_ON(node_percpu[i] != 0);
|
||||
alloc_bootmem_pfn(i, node_percpu[i],
|
||||
goal);
|
||||
} else {
|
||||
/* In high PAs, just reserve some pages. */
|
||||
/* In non-bootmem zones, just reserve some pages. */
|
||||
node_memmap_pfn[i] = node_free_pfn[i];
|
||||
node_free_pfn[i] += PFN_UP(memmap_size);
|
||||
if (!kdata_huge) {
|
||||
@@ -678,16 +728,9 @@ static void __init zone_sizes_init(void)
|
||||
zones_size[ZONE_NORMAL] = end - start;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Everyone shares node 0's bootmem allocator, but
|
||||
* we use alloc_remap(), above, to put the actual
|
||||
* struct page array on the individual controllers,
|
||||
* which is most of the data that we actually care about.
|
||||
* We can't place bootmem allocators on the other
|
||||
* controllers since the bootmem allocator can only
|
||||
* operate on 32-bit physical addresses.
|
||||
*/
|
||||
NODE_DATA(i)->bdata = NODE_DATA(0)->bdata;
|
||||
/* Take zone metadata from controller 0 if we're isolnode. */
|
||||
if (node_isset(i, isolnodes))
|
||||
NODE_DATA(i)->bdata = &bootmem_node_data[0];
|
||||
|
||||
free_area_init_node(i, zones_size, start, NULL);
|
||||
printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n",
|
||||
@@ -870,6 +913,22 @@ subsys_initcall(topology_init);
|
||||
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
/*
|
||||
* Initialize hugepage support on this cpu. We do this on all cores
|
||||
* early in boot: before argument parsing for the boot cpu, and after
|
||||
* argument parsing but before the init functions run on the secondaries.
|
||||
* So the values we set up here in the hypervisor may be overridden on
|
||||
* the boot cpu as arguments are parsed.
|
||||
*/
|
||||
static __cpuinit void init_super_pages(void)
|
||||
{
|
||||
#ifdef CONFIG_HUGETLB_SUPER_PAGES
|
||||
int i;
|
||||
for (i = 0; i < HUGE_SHIFT_ENTRIES; ++i)
|
||||
hv_set_pte_super_shift(i, huge_shift[i]);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
|
||||
* @boot: Is this the boot cpu?
|
||||
@@ -924,6 +983,8 @@ void __cpuinit setup_cpu(int boot)
|
||||
/* Reset the network state on this cpu. */
|
||||
reset_network_state();
|
||||
#endif
|
||||
|
||||
init_super_pages();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
@@ -1412,13 +1473,13 @@ void __init setup_per_cpu_areas(void)
|
||||
for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
|
||||
|
||||
/* Update the vmalloc mapping and page home. */
|
||||
pte_t *ptep =
|
||||
virt_to_pte(NULL, (unsigned long)ptr + i);
|
||||
unsigned long addr = (unsigned long)ptr + i;
|
||||
pte_t *ptep = virt_to_pte(NULL, addr);
|
||||
pte_t pte = *ptep;
|
||||
BUG_ON(pfn != pte_pfn(pte));
|
||||
pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
|
||||
pte = set_remote_cache_cpu(pte, cpu);
|
||||
set_pte(ptep, pte);
|
||||
set_pte_at(&init_mm, addr, ptep, pte);
|
||||
|
||||
/* Update the lowmem mapping for consistency. */
|
||||
lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
|
||||
@@ -1431,7 +1492,7 @@ void __init setup_per_cpu_areas(void)
|
||||
BUG_ON(pte_huge(*ptep));
|
||||
}
|
||||
BUG_ON(pfn != pte_pfn(*ptep));
|
||||
set_pte(ptep, pte);
|
||||
set_pte_at(&init_mm, lowmem_va, ptep, pte);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -172,9 +172,6 @@ static tile_bundle_bits rewrite_load_store_unaligned(
|
||||
return (tilepro_bundle_bits) 0;
|
||||
}
|
||||
|
||||
#ifndef __LITTLE_ENDIAN
|
||||
# error We assume little-endian representation with copy_xx_user size 2 here
|
||||
#endif
|
||||
/* Handle unaligned load/store */
|
||||
if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
|
||||
unsigned short val_16;
|
||||
@@ -195,8 +192,19 @@ static tile_bundle_bits rewrite_load_store_unaligned(
|
||||
state->update = 1;
|
||||
}
|
||||
} else {
|
||||
unsigned short val_16;
|
||||
val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
|
||||
err = copy_to_user(addr, &val, size);
|
||||
switch (size) {
|
||||
case 2:
|
||||
val_16 = val;
|
||||
err = copy_to_user(addr, &val_16, sizeof(val_16));
|
||||
break;
|
||||
case 4:
|
||||
err = copy_to_user(addr, &val, sizeof(val));
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
if (err) {
|
||||
|
@@ -203,7 +203,7 @@ void __init ipi_init(void)
|
||||
if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
|
||||
panic("Failed to initialize IPI for cpu %d\n", cpu);
|
||||
|
||||
offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
|
||||
offset = PFN_PHYS(pte_pfn(pte));
|
||||
ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
|
||||
}
|
||||
#endif
|
||||
|
@@ -32,11 +32,17 @@
|
||||
#include <asm/syscalls.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/homecache.h>
|
||||
#include <asm/cachectl.h>
|
||||
#include <arch/chip.h>
|
||||
|
||||
SYSCALL_DEFINE0(flush_cache)
|
||||
SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len,
|
||||
unsigned long, flags)
|
||||
{
|
||||
homecache_evict(cpumask_of(smp_processor_id()));
|
||||
if (flags & DCACHE)
|
||||
homecache_evict(cpumask_of(smp_processor_id()));
|
||||
if (flags & ICACHE)
|
||||
flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(current->mm),
|
||||
0, 0, 0, NULL, NULL, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -93,6 +93,10 @@ HV_CONF_ATTR(mezz_part, HV_CONFSTR_MEZZ_PART_NUM)
|
||||
HV_CONF_ATTR(mezz_serial, HV_CONFSTR_MEZZ_SERIAL_NUM)
|
||||
HV_CONF_ATTR(mezz_revision, HV_CONFSTR_MEZZ_REV)
|
||||
HV_CONF_ATTR(mezz_description, HV_CONFSTR_MEZZ_DESC)
|
||||
HV_CONF_ATTR(cpumod_part, HV_CONFSTR_CPUMOD_PART_NUM)
|
||||
HV_CONF_ATTR(cpumod_serial, HV_CONFSTR_CPUMOD_SERIAL_NUM)
|
||||
HV_CONF_ATTR(cpumod_revision, HV_CONFSTR_CPUMOD_REV)
|
||||
HV_CONF_ATTR(cpumod_description,HV_CONFSTR_CPUMOD_DESC)
|
||||
HV_CONF_ATTR(switch_control, HV_CONFSTR_SWITCH_CONTROL)
|
||||
|
||||
static struct attribute *board_attrs[] = {
|
||||
@@ -104,6 +108,10 @@ static struct attribute *board_attrs[] = {
|
||||
&dev_attr_mezz_serial.attr,
|
||||
&dev_attr_mezz_revision.attr,
|
||||
&dev_attr_mezz_description.attr,
|
||||
&dev_attr_cpumod_part.attr,
|
||||
&dev_attr_cpumod_serial.attr,
|
||||
&dev_attr_cpumod_revision.attr,
|
||||
&dev_attr_cpumod_description.attr,
|
||||
&dev_attr_switch_control.attr,
|
||||
NULL
|
||||
};
|
||||
|
@@ -15,6 +15,7 @@
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/homecache.h>
|
||||
#include <hv/hypervisor.h>
|
||||
@@ -49,25 +50,25 @@ void flush_tlb_current_task(void)
|
||||
flush_tlb_mm(current->mm);
|
||||
}
|
||||
|
||||
void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm,
|
||||
void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm,
|
||||
unsigned long va)
|
||||
{
|
||||
unsigned long size = hv_page_size(vma);
|
||||
unsigned long size = vma_kernel_pagesize(vma);
|
||||
int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
|
||||
flush_remote(0, cache, mm_cpumask(mm),
|
||||
va, size, size, mm_cpumask(mm), NULL, 0);
|
||||
}
|
||||
|
||||
void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va)
|
||||
void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
|
||||
{
|
||||
flush_tlb_page_mm(vma, vma->vm_mm, va);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_tlb_page);
|
||||
|
||||
void flush_tlb_range(const struct vm_area_struct *vma,
|
||||
void flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long size = hv_page_size(vma);
|
||||
unsigned long size = vma_kernel_pagesize(vma);
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
|
||||
flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
|
||||
|
@@ -195,6 +195,25 @@ static int special_ill(bundle_bits bundle, int *sigp, int *codep)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const char *const int_name[] = {
|
||||
[INT_MEM_ERROR] = "Memory error",
|
||||
[INT_ILL] = "Illegal instruction",
|
||||
[INT_GPV] = "General protection violation",
|
||||
[INT_UDN_ACCESS] = "UDN access",
|
||||
[INT_IDN_ACCESS] = "IDN access",
|
||||
#if CHIP_HAS_SN()
|
||||
[INT_SN_ACCESS] = "SN access",
|
||||
#endif
|
||||
[INT_SWINT_3] = "Software interrupt 3",
|
||||
[INT_SWINT_2] = "Software interrupt 2",
|
||||
[INT_SWINT_0] = "Software interrupt 0",
|
||||
[INT_UNALIGN_DATA] = "Unaligned data",
|
||||
[INT_DOUBLE_FAULT] = "Double fault",
|
||||
#ifdef __tilegx__
|
||||
[INT_ILL_TRANS] = "Illegal virtual address",
|
||||
#endif
|
||||
};
|
||||
|
||||
void __kprobes do_trap(struct pt_regs *regs, int fault_num,
|
||||
unsigned long reason)
|
||||
{
|
||||
@@ -211,10 +230,17 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
|
||||
* current process and hope for the best.
|
||||
*/
|
||||
if (!user_mode(regs)) {
|
||||
const char *name;
|
||||
if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */
|
||||
return;
|
||||
pr_alert("Kernel took bad trap %d at PC %#lx\n",
|
||||
fault_num, regs->pc);
|
||||
if (fault_num >= 0 &&
|
||||
fault_num < sizeof(int_name)/sizeof(int_name[0]) &&
|
||||
int_name[fault_num] != NULL)
|
||||
name = int_name[fault_num];
|
||||
else
|
||||
name = "Unknown interrupt";
|
||||
pr_alert("Kernel took bad trap %d (%s) at PC %#lx\n",
|
||||
fault_num, name, regs->pc);
|
||||
if (fault_num == INT_GPV)
|
||||
pr_alert("GPV_REASON is %#lx\n", reason);
|
||||
show_regs(regs);
|
||||
|
Referens i nytt ärende
Block a user