Merge tag 'arc-4.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC architecture updates from Vineet Gupta:
 - Big Endian io accessors fix [Lada]
 - Spellos fixes [Adam]
 - Fix for DW GMAC breakage [Alexey]
 - Making DMA API 64-bit ready
 - Shutting up -Wmaybe-uninitialized noise for ARC
 - Other minor fixes here and there, comments update

* tag 'arc-4.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: (21 commits)
  ARCv2: ioremap: Support dynamic peripheral address space
  ARC: dma: reintroduce platform specific dma<->phys
  ARC: dma: ioremap: use phys_addr_t consistenctly in code paths
  ARC: dma: pass_phys() not sg_virt() to cache ops
  ARC: dma: non-coherent pages need V-P mapping if in HIGHMEM
  ARC: dma: Use struct page based page allocator helpers
  ARC: build: Turn off -Wmaybe-uninitialized for ARC gcc 4.8
  ARC: [plat-axs10x] add Ethernet PHY description in .dts
  arc: use of_platform_default_populate() to populate default bus
  ARC: thp: unbork !CONFIG_TRANSPARENT_HUGEPAGE build
  arc: [plat-nsimosci*] use ezchip network driver
  ARCv2: LLSC: software backoff is NOT needed starting HS2.1c
  ARC: mm: Use virt_to_pfn() for addr >> PAGE_SHIFT pattern
  ARC: [plat-nsim] document ranges
  ARC: build: Better way to detect ISA compatible toolchain
  ARCv2: Allow enabling PAE40 w/o HIGHMEM
  ARC: [BE] readl()/writel() to work in Big Endian CPU configuration
  ARC: [*defconfig] No need to specify CONFIG_CROSS_COMPILE
  ARC: [BE] Select correct CROSS_COMPILE prefix
  ARC: bitops: Remove non relevant comments
  ...
This commit is contained in:
Linus Torvalds
2016-03-21 13:00:46 -07:00
38 changed files with 200 additions and 154 deletions

View File

@@ -381,12 +381,6 @@ static inline int is_isa_arcompact(void)
return IS_ENABLED(CONFIG_ISA_ARCOMPACT);
}
#if defined(CONFIG_ISA_ARCOMPACT) && !defined(_CPU_DEFAULT_A7)
#error "Toolchain not configured for ARCompact builds"
#elif defined(CONFIG_ISA_ARCV2) && !defined(_CPU_DEFAULT_HS)
#error "Toolchain not configured for ARCv2 builds"
#endif
#endif /* __ASEMBLY__ */
#endif /* _ASM_ARC_ARCREGS_H */

View File

@@ -35,21 +35,6 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
\
m += nr >> 5; \
\
/* \
* ARC ISA micro-optimization: \
* \
* Instructions dealing with bitpos only consider lower 5 bits \
* e.g (x << 33) is handled like (x << 1) by ASL instruction \
* (mem pointer still needs adjustment to point to next word) \
* \
* Hence the masking to clamp @nr arg can be elided in general. \
* \
* However if @nr is a constant (above assumed in a register), \
* and greater than 31, gcc can optimize away (x << 33) to 0, \
* as overflow, given the 32-bit ISA. Thus masking needs to be \
* done for const @nr, but no code is generated due to gcc \
* const prop. \
*/ \
nr &= 0x1f; \
\
__asm__ __volatile__( \

View File

@@ -54,6 +54,7 @@ extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);
extern int ioc_exists;
extern unsigned long perip_base;
#endif /* !__ASSEMBLY__ */

View File

@@ -40,9 +40,9 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
void flush_dcache_page(struct page *page);
void dma_cache_wback_inv(unsigned long start, unsigned long sz);
void dma_cache_inv(unsigned long start, unsigned long sz);
void dma_cache_wback(unsigned long start, unsigned long sz);
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
void dma_cache_inv(phys_addr_t start, unsigned long sz);
void dma_cache_wback(phys_addr_t start, unsigned long sz);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)

View File

@@ -149,7 +149,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
* Since xchg() doesn't always do that, it would seem that following defintion
* is incorrect. But here's the rationale:
* SMP : Even xchg() takes the atomic_ops_lock, so OK.
* LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
* LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
* is natively "SMP safe", no serialization required).
* UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
* could clobber them. atomic_xchg() itself would be 1 insn, so it

View File

@@ -11,6 +11,13 @@
#ifndef ASM_ARC_DMA_MAPPING_H
#define ASM_ARC_DMA_MAPPING_H
#ifndef CONFIG_ARC_PLAT_NEEDS_PHYS_TO_DMA
#define plat_dma_to_phys(dev, dma_handle) ((phys_addr_t)(dma_handle))
#define plat_phys_to_dma(dev, paddr) ((dma_addr_t)(paddr))
#else
#include <plat/dma.h>
#endif
extern struct dma_map_ops arc_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)

View File

@@ -231,7 +231,7 @@
/* free up r9 as scratchpad */
PROLOG_FREEUP_REG r9, @int\LVL\()_saved_reg
/* Which mode (user/kernel) was the system in when intr occured */
/* Which mode (user/kernel) was the system in when intr occurred */
lr r9, [status32_l\LVL\()]
SWITCH_TO_KERNEL_STK

View File

@@ -13,8 +13,8 @@
#include <asm/byteorder.h>
#include <asm/page.h>
extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
unsigned long flags);
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
@@ -138,15 +138,23 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
/*
* Relaxed API for drivers which can handle any ordering themselves
* Relaxed API for drivers which can handle barrier ordering themselves
*
* Also these are defined to perform little endian accesses.
* To provide the typical device register semantics of fixed endian,
* swap the byte order for Big Endian
*
* http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
*/
#define readb_relaxed(c) __raw_readb(c)
#define readw_relaxed(c) __raw_readw(c)
#define readl_relaxed(c) __raw_readl(c)
#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
__raw_readw(c)); __r; })
#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
__raw_readl(c)); __r; })
#define writeb_relaxed(v,c) __raw_writeb(v,c)
#define writew_relaxed(v,c) __raw_writew(v,c)
#define writel_relaxed(v,c) __raw_writel(v,c)
#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
#include <asm-generic/io.h>

View File

@@ -10,7 +10,6 @@
#include <uapi/asm/page.h>
#ifndef __ASSEMBLY__
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
@@ -76,30 +75,26 @@ typedef unsigned long pgprot_t;
typedef pte_t * pgtable_t;
#define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
#define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_LINK_BASE)
#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
/*
* __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
*
* These macros have historically been misnamed
* virt here means link-address/program-address as embedded in object code.
* So if kernel img is linked at 0x8000_0000 onwards, 0x8010_0000 will be
* 128th page, and virt_to_page( ) will return the struct page corresp to it.
* mem_map[ ] is an array of struct page for each page frame in the system
*
* Independent of where linux is linked at, link-addr = physical address
* So the old macro __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE
* would have been wrong in case kernel is not at 0x8zs
* And for ARC, link-addr = physical address
*/
#define __pa(vaddr) ((unsigned long)vaddr)
#define __va(paddr) ((void *)((unsigned long)(paddr)))
#define virt_to_page(kaddr) \
(mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
(mem_map + virt_to_pfn((kaddr) - CONFIG_LINUX_LINK_BASE))
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
/* Default Permissions for stack/heaps pages (Non Executable) */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)

View File

@@ -12,7 +12,7 @@
* - Utilise some unused free bits to confine PTE flags to 12 bits
* This is a must for 4k pg-sz
*
* vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
* vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods
* -TLB Locking never really existed, except for initial specs
* -SILENT_xxx not needed for our port
* -Per my request, MMU V3 changes the layout of some of the bits
@@ -278,15 +278,14 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
#define pmd_present(x) (pmd_val(x))
#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
#define pte_page(x) (mem_map + \
(unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
PAGE_SHIFT)))
#define pte_page(pte) \
(mem_map + virt_to_pfn(pte_val(pte) - CONFIG_LINUX_LINK_BASE))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pte_pfn(pte) virt_to_pfn(pte_val(pte))
#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \
pgprot_val(prot)))
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define __pte_index(addr) (virt_to_pfn(addr) & (PTRS_PER_PTE - 1))
/*
* pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)

View File

@@ -17,8 +17,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
#endif
#ifndef CONFIG_SMP
#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
@@ -26,7 +28,9 @@ void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define flush_pmd_tlb_range(vma, s, e) local_flush_pmd_tlb_range(vma, s, e)
#endif
#else
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
@@ -34,7 +38,8 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
#endif
#endif /* CONFIG_SMP */
#endif