csky: Cache and TLB routines

This patch adds cache and tlb sync codes for abiv1 & abiv2.

Signed-off-by: Guo Ren <ren_guo@c-sky.com>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Guo Ren
2018-09-05 14:25:10 +08:00
父節點 4859bfca11
當前提交 00a9730e10
共有 15 個文件被更改,包括 838 次插入0 次删除

126
arch/csky/mm/cachev1.c Normal file
查看文件

@@ -0,0 +1,126 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/spinlock.h>
#include <asm/cache.h>
#include <abi/reg_ops.h>
/* for L1-cache */
#define INS_CACHE (1 << 0)
#define DATA_CACHE (1 << 1)
#define CACHE_INV (1 << 4)
#define CACHE_CLR (1 << 5)
#define CACHE_OMS (1 << 6)
#define CACHE_ITS (1 << 7)
#define CACHE_LICF (1 << 31)
/* for L2-cache */
#define CR22_LEVEL_SHIFT (1)
#define CR22_SET_SHIFT (7)
#define CR22_WAY_SHIFT (30)
#define CR22_WAY_SHIFT_L2 (29)
static DEFINE_SPINLOCK(cache_lock);
static inline void cache_op_line(unsigned long i, unsigned int val)
{
mtcr("cr22", i);
mtcr("cr17", val);
}
#define CCR2_L2E (1 << 3)
static void cache_op_all(unsigned int value, unsigned int l2)
{
mtcr("cr17", value | CACHE_CLR);
mb();
if (l2 && (mfcr_ccr2() & CCR2_L2E)) {
mtcr("cr24", value | CACHE_CLR);
mb();
}
}
static void cache_op_range(
unsigned int start,
unsigned int end,
unsigned int value,
unsigned int l2)
{
unsigned long i, flags;
unsigned int val = value | CACHE_CLR | CACHE_OMS;
bool l2_sync;
if (unlikely((end - start) >= PAGE_SIZE) ||
unlikely(start < PAGE_OFFSET) ||
unlikely(start >= PAGE_OFFSET + LOWMEM_LIMIT)) {
cache_op_all(value, l2);
return;
}
if ((mfcr_ccr2() & CCR2_L2E) && l2)
l2_sync = 1;
else
l2_sync = 0;
spin_lock_irqsave(&cache_lock, flags);
i = start & ~(L1_CACHE_BYTES - 1);
for (; i < end; i += L1_CACHE_BYTES) {
cache_op_line(i, val);
if (l2_sync) {
mb();
mtcr("cr24", val);
}
}
spin_unlock_irqrestore(&cache_lock, flags);
mb();
}
void dcache_wb_line(unsigned long start)
{
asm volatile("idly4\n":::"memory");
cache_op_line(start, DATA_CACHE|CACHE_CLR);
mb();
}
void icache_inv_range(unsigned long start, unsigned long end)
{
cache_op_range(start, end, INS_CACHE|CACHE_INV, 0);
}
void icache_inv_all(void)
{
cache_op_all(INS_CACHE|CACHE_INV, 0);
}
void dcache_wb_range(unsigned long start, unsigned long end)
{
cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0);
}
void dcache_wbinv_all(void)
{
cache_op_all(DATA_CACHE|CACHE_CLR|CACHE_INV, 0);
}
void cache_wbinv_range(unsigned long start, unsigned long end)
{
cache_op_range(start, end, INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0);
}
EXPORT_SYMBOL(cache_wbinv_range);
void cache_wbinv_all(void)
{
cache_op_all(INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0);
}
void dma_wbinv_range(unsigned long start, unsigned long end)
{
cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
}
void dma_wb_range(unsigned long start, unsigned long end)
{
cache_op_range(start, end, DATA_CACHE|CACHE_INV, 1);
}

79
arch/csky/mm/cachev2.c Normal file
查看文件

@@ -0,0 +1,79 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <asm/cache.h>
#include <asm/barrier.h>
inline void dcache_wb_line(unsigned long start)
{
asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
sync_is();
}
void icache_inv_range(unsigned long start, unsigned long end)
{
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
for (; i < end; i += L1_CACHE_BYTES)
asm volatile("icache.iva %0\n"::"r"(i):"memory");
sync_is();
}
void icache_inv_all(void)
{
asm volatile("icache.ialls\n":::"memory");
sync_is();
}
void dcache_wb_range(unsigned long start, unsigned long end)
{
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
for (; i < end; i += L1_CACHE_BYTES)
asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
sync_is();
}
void dcache_inv_range(unsigned long start, unsigned long end)
{
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
for (; i < end; i += L1_CACHE_BYTES)
asm volatile("dcache.civa %0\n"::"r"(i):"memory");
sync_is();
}
void cache_wbinv_range(unsigned long start, unsigned long end)
{
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
for (; i < end; i += L1_CACHE_BYTES)
asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
sync_is();
i = start & ~(L1_CACHE_BYTES - 1);
for (; i < end; i += L1_CACHE_BYTES)
asm volatile("icache.iva %0\n"::"r"(i):"memory");
sync_is();
}
EXPORT_SYMBOL(cache_wbinv_range);
void dma_wbinv_range(unsigned long start, unsigned long end)
{
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
for (; i < end; i += L1_CACHE_BYTES)
asm volatile("dcache.civa %0\n"::"r"(i):"memory");
sync_is();
}
void dma_wb_range(unsigned long start, unsigned long end)
{
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
for (; i < end; i += L1_CACHE_BYTES)
asm volatile("dcache.civa %0\n"::"r"(i):"memory");
sync_is();
}

32
arch/csky/mm/syscache.c Normal file
查看文件

@@ -0,0 +1,32 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/syscalls.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/cachectl.h>
SYSCALL_DEFINE3(cacheflush,
void __user *, addr,
unsigned long, bytes,
int, cache)
{
switch (cache) {
case ICACHE:
icache_inv_range((unsigned long)addr,
(unsigned long)addr + bytes);
break;
case DCACHE:
dcache_wb_range((unsigned long)addr,
(unsigned long)addr + bytes);
break;
case BCACHE:
cache_wbinv_range((unsigned long)addr,
(unsigned long)addr + bytes);
break;
default:
return -EINVAL;
}
return 0;
}

219
arch/csky/mm/tlb.c Normal file
查看文件

@@ -0,0 +1,219 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#define CSKY_TLB_SIZE CONFIG_CPU_TLB_SIZE
void flush_tlb_all(void)
{
tlb_invalid_all();
}
void flush_tlb_mm(struct mm_struct *mm)
{
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0)
drop_mmu_context(mm, cpu);
tlb_invalid_all();
}
#define restore_asid_inv_utlb(oldpid, newpid) \
do { \
if ((oldpid & ASID_MASK) == newpid) \
write_mmu_entryhi(oldpid + 1); \
write_mmu_entryhi(oldpid); \
} while (0)
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0) {
unsigned long size, flags;
int newpid = cpu_asid(cpu, mm);
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
if (size <= CSKY_TLB_SIZE/2) {
start &= (PAGE_MASK << 1);
end += ((PAGE_SIZE << 1) - 1);
end &= (PAGE_MASK << 1);
#ifdef CONFIG_CPU_HAS_TLBI
while (start < end) {
asm volatile("tlbi.vaas %0"
::"r"(start | newpid));
start += (PAGE_SIZE << 1);
}
sync_is();
#else
{
int oldpid = read_mmu_entryhi();
while (start < end) {
int idx;
write_mmu_entryhi(start | newpid);
start += (PAGE_SIZE << 1);
tlb_probe();
idx = read_mmu_index();
if (idx >= 0)
tlb_invalid_indexed();
}
restore_asid_inv_utlb(oldpid, newpid);
}
#endif
} else {
drop_mmu_context(mm, cpu);
}
local_irq_restore(flags);
}
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long size, flags;
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size <= CSKY_TLB_SIZE) {
start &= (PAGE_MASK << 1);
end += ((PAGE_SIZE << 1) - 1);
end &= (PAGE_MASK << 1);
#ifdef CONFIG_CPU_HAS_TLBI
while (start < end) {
asm volatile("tlbi.vaas %0"::"r"(start));
start += (PAGE_SIZE << 1);
}
sync_is();
#else
{
int oldpid = read_mmu_entryhi();
while (start < end) {
int idx;
write_mmu_entryhi(start);
start += (PAGE_SIZE << 1);
tlb_probe();
idx = read_mmu_index();
if (idx >= 0)
tlb_invalid_indexed();
}
restore_asid_inv_utlb(oldpid, 0);
}
#endif
} else {
flush_tlb_all();
}
local_irq_restore(flags);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
int cpu = smp_processor_id();
int newpid = cpu_asid(cpu, vma->vm_mm);
if (!vma || cpu_context(cpu, vma->vm_mm) != 0) {
page &= (PAGE_MASK << 1);
#ifdef CONFIG_CPU_HAS_TLBI
asm volatile("tlbi.vaas %0"::"r"(page | newpid));
sync_is();
#else
{
int oldpid, idx;
unsigned long flags;
local_irq_save(flags);
oldpid = read_mmu_entryhi();
write_mmu_entryhi(page | newpid);
tlb_probe();
idx = read_mmu_index();
if (idx >= 0)
tlb_invalid_indexed();
restore_asid_inv_utlb(oldpid, newpid);
local_irq_restore(flags);
}
#endif
}
}
/*
* Remove one kernel space TLB entry. This entry is assumed to be marked
* global so we don't do the ASID thing.
*/
void flush_tlb_one(unsigned long page)
{
int oldpid;
oldpid = read_mmu_entryhi();
page &= (PAGE_MASK << 1);
#ifdef CONFIG_CPU_HAS_TLBI
page = page | (oldpid & 0xfff);
asm volatile("tlbi.vaas %0"::"r"(page));
sync_is();
#else
{
int idx;
unsigned long flags;
page = page | (oldpid & 0xff);
local_irq_save(flags);
write_mmu_entryhi(page);
tlb_probe();
idx = read_mmu_index();
if (idx >= 0)
tlb_invalid_indexed();
restore_asid_inv_utlb(oldpid, oldpid);
local_irq_restore(flags);
}
#endif
}
EXPORT_SYMBOL(flush_tlb_one);
/* show current 32 jtlbs */
void show_jtlb_table(void)
{
unsigned long flags;
int entryhi, entrylo0, entrylo1;
int entry;
int oldpid;
local_irq_save(flags);
entry = 0;
pr_info("\n\n\n");
oldpid = read_mmu_entryhi();
while (entry < CSKY_TLB_SIZE) {
write_mmu_index(entry);
tlb_read();
entryhi = read_mmu_entryhi();
entrylo0 = read_mmu_entrylo0();
entrylo0 = entrylo0;
entrylo1 = read_mmu_entrylo1();
entrylo1 = entrylo1;
pr_info("jtlb[%d]: entryhi - 0x%x; entrylo0 - 0x%x;"
" entrylo1 - 0x%x\n",
entry, entryhi, entrylo0, entrylo1);
entry++;
}
write_mmu_entryhi(oldpid);
local_irq_restore(flags);
}