Merge tag 'csky-for-linus-5.4-rc1' of git://github.com/c-sky/csky-linux
Pull csky updates from Guo Ren: "This round of csky subsystem just some fixups: - Fix mb() synchronization problem - Fix dma_alloc_coherent with PAGE_SO attribute - Fix cache_op failed when cross memory ZONEs - Optimize arch_sync_dma_for_cpu/device with dma_inv_range - Fix ioremap function losing - Fix arch_get_unmapped_area() implementation - Fix defer cache flush for 610 - Support kernel non-aligned access - Fix 610 vipt cache flush mechanism - Fix add zero_fp fixup perf backtrace panic - Move static keyword to the front of declaration - Fix csky_pmu.max_period assignment - Use generic free_initrd_mem() - entry: Remove unneeded need_resched() loop" * tag 'csky-for-linus-5.4-rc1' of git://github.com/c-sky/csky-linux: csky: Move static keyword to the front of declaration csky: entry: Remove unneeded need_resched() loop csky: Fixup csky_pmu.max_period assignment csky: Fixup add zero_fp fixup perf backtrace panic csky: Use generic free_initrd_mem() csky: Fixup 610 vipt cache flush mechanism csky: Support kernel non-aligned access csky: Fixup defer cache flush for 610 csky: Fixup arch_get_unmapped_area() implementation csky: Fixup ioremap function losing csky: Optimize arch_sync_dma_for_cpu/device with dma_inv_range csky/dma: Fixup cache_op failed when cross memory ZONEs csky: Fixup dma_alloc_coherent with PAGE_SO attribute csky: Fixup mb() synchronization problem
This commit is contained in:
@@ -9,11 +9,12 @@
|
||||
#define nop() asm volatile ("nop\n":::"memory")
|
||||
|
||||
/*
|
||||
* sync: completion barrier
|
||||
* sync.s: completion barrier and shareable to other cores
|
||||
* sync.i: completion barrier with flush cpu pipeline
|
||||
* sync.is: completion barrier with flush cpu pipeline and shareable to
|
||||
* other cores
|
||||
* sync: completion barrier, all sync.xx instructions
|
||||
* guarantee the last response recieved by bus transaction
|
||||
* made by ld/st instructions before sync.s
|
||||
* sync.s: inherit from sync, but also shareable to other cores
|
||||
* sync.i: inherit from sync, but also flush cpu pipeline
|
||||
* sync.is: the same with sync.i + sync.s
|
||||
*
|
||||
* bar.brwarw: ordering barrier for all load/store instructions before it
|
||||
* bar.brwarws: ordering barrier for all load/store instructions before it
|
||||
@@ -27,9 +28,7 @@
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_CACHEV2
|
||||
#define mb() asm volatile ("bar.brwarw\n":::"memory")
|
||||
#define rmb() asm volatile ("bar.brar\n":::"memory")
|
||||
#define wmb() asm volatile ("bar.bwaw\n":::"memory")
|
||||
#define mb() asm volatile ("sync.s\n":::"memory")
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define __smp_mb() asm volatile ("bar.brwarws\n":::"memory")
|
||||
|
@@ -24,6 +24,7 @@ void cache_wbinv_range(unsigned long start, unsigned long end);
|
||||
void cache_wbinv_all(void);
|
||||
|
||||
void dma_wbinv_range(unsigned long start, unsigned long end);
|
||||
void dma_inv_range(unsigned long start, unsigned long end);
|
||||
void dma_wb_range(unsigned long start, unsigned long end);
|
||||
|
||||
#endif
|
||||
|
@@ -4,17 +4,10 @@
|
||||
#ifndef __ASM_CSKY_IO_H
|
||||
#define __ASM_CSKY_IO_H
|
||||
|
||||
#include <abi/pgtable-bits.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/version.h>
|
||||
|
||||
extern void __iomem *ioremap(phys_addr_t offset, size_t size);
|
||||
|
||||
extern void iounmap(void *addr);
|
||||
|
||||
extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
|
||||
size_t size, unsigned long flags);
|
||||
|
||||
/*
|
||||
* I/O memory access primitives. Reads are ordered relative to any
|
||||
* following Normal memory access. Writes are ordered relative to any prior
|
||||
@@ -40,9 +33,17 @@ extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
|
||||
#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); })
|
||||
#endif
|
||||
|
||||
#define ioremap_nocache(phy, sz) ioremap(phy, sz)
|
||||
#define ioremap_wc ioremap_nocache
|
||||
#define ioremap_wt ioremap_nocache
|
||||
/*
|
||||
* I/O memory mapping functions.
|
||||
*/
|
||||
extern void __iomem *ioremap_cache(phys_addr_t addr, size_t size);
|
||||
extern void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot);
|
||||
extern void iounmap(void *addr);
|
||||
|
||||
#define ioremap(addr, size) __ioremap((addr), (size), pgprot_noncached(PAGE_KERNEL))
|
||||
#define ioremap_wc(addr, size) __ioremap((addr), (size), pgprot_writecombine(PAGE_KERNEL))
|
||||
#define ioremap_nocache(addr, size) ioremap((addr), (size))
|
||||
#define ioremap_cache ioremap_cache
|
||||
|
||||
#include <asm-generic/io.h>
|
||||
|
||||
|
@@ -258,6 +258,16 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
|
||||
{
|
||||
unsigned long prot = pgprot_val(_prot);
|
||||
|
||||
prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO;
|
||||
|
||||
return __pgprot(prot);
|
||||
}
|
||||
|
||||
#define pgprot_writecombine pgprot_writecombine
|
||||
static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
|
||||
{
|
||||
unsigned long prot = pgprot_val(_prot);
|
||||
|
||||
prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
|
||||
|
||||
return __pgprot(prot);
|
||||
|
مرجع در شماره جدید
Block a user