csky: Cache and TLB routines
This patch adds cache and tlb sync codes for abiv1 & abiv2. Signed-off-by: Guo Ren <ren_guo@c-sky.com> Reviewed-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
49
arch/csky/include/asm/barrier.h
Normal file
49
arch/csky/include/asm/barrier.h
Normal file
@@ -0,0 +1,49 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
||||
|
||||
#ifndef __ASM_CSKY_BARRIER_H
|
||||
#define __ASM_CSKY_BARRIER_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define nop() asm volatile ("nop\n":::"memory")
|
||||
|
||||
/*
|
||||
* sync: completion barrier
|
||||
* sync.s: completion barrier and shareable to other cores
|
||||
* sync.i: completion barrier with flush cpu pipeline
|
||||
* sync.is: completion barrier with flush cpu pipeline and shareable to
|
||||
* other cores
|
||||
*
|
||||
* bar.brwarw: ordering barrier for all load/store instructions before it
|
||||
* bar.brwarws: ordering barrier for all load/store instructions before it
|
||||
* and shareable to other cores
|
||||
* bar.brar: ordering barrier for all load instructions before it
|
||||
* bar.brars: ordering barrier for all load instructions before it
|
||||
* and shareable to other cores
|
||||
* bar.bwaw: ordering barrier for all store instructions before it
|
||||
* bar.bwaws: ordering barrier for all store instructions before it
|
||||
* and shareable to other cores
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_CACHEV2
|
||||
#define mb() asm volatile ("bar.brwarw\n":::"memory")
|
||||
#define rmb() asm volatile ("bar.brar\n":::"memory")
|
||||
#define wmb() asm volatile ("bar.bwaw\n":::"memory")
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define __smp_mb() asm volatile ("bar.brwarws\n":::"memory")
|
||||
#define __smp_rmb() asm volatile ("bar.brars\n":::"memory")
|
||||
#define __smp_wmb() asm volatile ("bar.bwaws\n":::"memory")
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#define sync_is() asm volatile ("sync.is\n":::"memory")
|
||||
|
||||
#else /* !CONFIG_CPU_HAS_CACHEV2 */
|
||||
#define mb() asm volatile ("sync\n":::"memory")
|
||||
#endif
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_CSKY_BARRIER_H */
|
30
arch/csky/include/asm/cache.h
Normal file
30
arch/csky/include/asm/cache.h
Normal file
@@ -0,0 +1,30 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef __ASM_CSKY_CACHE_H
|
||||
#define __ASM_CSKY_CACHE_H
|
||||
|
||||
/* bytes per L1 cache line */
|
||||
#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
|
||||
|
||||
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
||||
|
||||
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
void dcache_wb_line(unsigned long start);
|
||||
|
||||
void icache_inv_range(unsigned long start, unsigned long end);
|
||||
void icache_inv_all(void);
|
||||
|
||||
void dcache_wb_range(unsigned long start, unsigned long end);
|
||||
void dcache_wbinv_all(void);
|
||||
|
||||
void cache_wbinv_range(unsigned long start, unsigned long end);
|
||||
void cache_wbinv_all(void);
|
||||
|
||||
void dma_wbinv_range(unsigned long start, unsigned long end);
|
||||
void dma_wb_range(unsigned long start, unsigned long end);
|
||||
|
||||
#endif
|
||||
#endif /* __ASM_CSKY_CACHE_H */
|
9
arch/csky/include/asm/cacheflush.h
Normal file
9
arch/csky/include/asm/cacheflush.h
Normal file
@@ -0,0 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
||||
|
||||
#ifndef __ASM_CSKY_CACHEFLUSH_H
|
||||
#define __ASM_CSKY_CACHEFLUSH_H
|
||||
|
||||
#include <abi/cacheflush.h>
|
||||
|
||||
#endif /* __ASM_CSKY_CACHEFLUSH_H */
|
24
arch/csky/include/asm/io.h
Normal file
24
arch/csky/include/asm/io.h
Normal file
@@ -0,0 +1,24 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
||||
|
||||
#ifndef __ASM_CSKY_IO_H
|
||||
#define __ASM_CSKY_IO_H
|
||||
|
||||
#include <abi/pgtable-bits.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/version.h>
|
||||
|
||||
extern void __iomem *ioremap(phys_addr_t offset, size_t size);
|
||||
|
||||
extern void iounmap(void *addr);
|
||||
|
||||
extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
|
||||
size_t size, unsigned long flags);
|
||||
|
||||
#define ioremap_nocache(phy, sz) ioremap(phy, sz)
|
||||
#define ioremap_wc ioremap_nocache
|
||||
#define ioremap_wt ioremap_nocache
|
||||
|
||||
#include <asm-generic/io.h>
|
||||
|
||||
#endif /* __ASM_CSKY_IO_H */
|
25
arch/csky/include/asm/tlb.h
Normal file
25
arch/csky/include/asm/tlb.h
Normal file
@@ -0,0 +1,25 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
||||
|
||||
#ifndef __ASM_CSKY_TLB_H
|
||||
#define __ASM_CSKY_TLB_H
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#define tlb_start_vma(tlb, vma) \
|
||||
do { \
|
||||
if (!tlb->fullmm) \
|
||||
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
|
||||
} while (0)
|
||||
|
||||
#define tlb_end_vma(tlb, vma) \
|
||||
do { \
|
||||
if (!tlb->fullmm) \
|
||||
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
||||
} while (0)
|
||||
|
||||
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
|
||||
|
||||
#include <asm-generic/tlb.h>
|
||||
|
||||
#endif /* __ASM_CSKY_TLB_H */
|
25
arch/csky/include/asm/tlbflush.h
Normal file
25
arch/csky/include/asm/tlbflush.h
Normal file
@@ -0,0 +1,25 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
||||
|
||||
#ifndef __ASM_TLBFLUSH_H
|
||||
#define __ASM_TLBFLUSH_H
|
||||
|
||||
/*
|
||||
* TLB flushing:
|
||||
*
|
||||
* - flush_tlb_all() flushes all processes TLB entries
|
||||
* - flush_tlb_mm(mm) flushes the specified mm context TLB entries
|
||||
* - flush_tlb_page(vma, vmaddr) flushes one page
|
||||
* - flush_tlb_range(vma, start, end) flushes a range of pages
|
||||
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
|
||||
*/
|
||||
extern void flush_tlb_all(void);
|
||||
extern void flush_tlb_mm(struct mm_struct *mm);
|
||||
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
|
||||
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end);
|
||||
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
|
||||
extern void flush_tlb_one(unsigned long vaddr);
|
||||
|
||||
#endif
|
Reference in New Issue
Block a user