xtensa: nommu support

Add support for !CONFIG_MMU setups.

Signed-off-by: Johannes Weiner <jw@emlix.com>
Signed-off-by: Chris Zankel <chris@zankel.net>
This commit is contained in:
Johannes Weiner
2009-03-04 16:21:31 +01:00
committed by Chris Zankel
parent 7789f89af9
commit e5083a63b6
19 changed files with 169 additions and 75 deletions

View File

@@ -65,13 +65,17 @@ extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
#endif
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
#else
static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
unsigned long phys) { }
#endif
#if (ICACHE_WAY_SIZE > PAGE_SIZE)
#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
#else
# define __invalidate_icache_page_alias(v,p) do { } while(0)
static inline void __invalidate_icache_page_alias(unsigned long virt,
unsigned long phys) { }
#endif
/*

View File

@@ -44,8 +44,9 @@
* the value desired).
*/
#ifndef MAX_DMA_ADDRESS
#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KIO_SIZE - 1)
#endif
/* Reserve and release a DMA channel */
extern int request_dma(unsigned int dmanr, const char * device_id);

View File

@@ -69,21 +69,28 @@ static inline void * phys_to_virt(unsigned long address)
static inline void *ioremap(unsigned long offset, unsigned long size)
{
#ifdef CONFIG_MMU
if (offset >= XCHAL_KIO_PADDR
&& offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
else
BUG();
#else
return (void *)offset;
#endif
}
static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
{
#ifdef CONFIG_MMU
if (offset >= XCHAL_KIO_PADDR
&& offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
else
BUG();
#else
return (void *)offset;
#endif
}
static inline void iounmap(void *addr)

View File

@@ -11,7 +11,12 @@
#ifndef _XTENSA_MMU_H
#define _XTENSA_MMU_H
#ifndef CONFIG_MMU
#include <asm/nommu.h>
#else
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
#endif /* CONFIG_MMU */
#endif /* _XTENSA_MMU_H */

View File

@@ -13,6 +13,10 @@
#ifndef _XTENSA_MMU_CONTEXT_H
#define _XTENSA_MMU_CONTEXT_H
#ifndef CONFIG_MMU
#include <asm/nommu_context.h>
#else
#include <linux/stringify.h>
#include <linux/sched.h>
@@ -133,4 +137,5 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
}
#endif /* CONFIG_MMU */
#endif /* _XTENSA_MMU_CONTEXT_H */

View File

@@ -0,0 +1,3 @@
typedef struct {
unsigned long end_brk;
} mm_context_t;

View File

@@ -0,0 +1,25 @@
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
return 0;
}
static inline void destroy_context(struct mm_struct *mm)
{
}
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
}
static inline void deactivate_mm(struct task_struct *tsk, struct mm_struct *mm)
{
}

View File

@@ -33,8 +33,14 @@
#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef CONFIG_MMU
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
#define MAX_MEM_PFN XCHAL_KSEG_SIZE
#else
#define PAGE_OFFSET 0
#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
#endif
#define PGTABLE_START 0x80000000
/*
@@ -165,8 +171,9 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*);
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#ifdef CONFIG_MMU
#define WANT_PAGE_VIRTUAL
#endif
#endif /* __ASSEMBLY__ */

View File

@@ -183,7 +183,15 @@ extern unsigned long empty_zero_page[1024];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#ifdef CONFIG_MMU
extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
extern void paging_init(void);
extern void pgtable_cache_init(void);
#else
# define swapper_pg_dir NULL
static inline void paging_init(void) { }
static inline void pgtable_cache_init(void) { }
#endif
/*
* The pmd contains the kernel virtual address of the pte page.
@@ -383,8 +391,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
#else
extern void paging_init(void);
#define kern_addr_valid(addr) (1)
extern void update_mmu_cache(struct vm_area_struct * vma,
@@ -398,9 +404,6 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
#define io_remap_pfn_range(vma,from,pfn,size,prot) \
remap_pfn_range(vma, from, pfn, size, prot)
extern void pgtable_cache_init(void);
typedef pte_t *pte_addr_t;
#endif /* !defined (__ASSEMBLY__) */

View File

@@ -13,6 +13,7 @@
#include <variant/core.h>
#include <asm/coprocessor.h>
#include <platform/hardware.h>
#include <linux/compiler.h>
#include <asm/ptrace.h>
@@ -35,7 +36,12 @@
* the 1 GB requirement applies to the stack as well.
*/
#ifdef CONFIG_MMU
#define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
#else
#define TASK_SIZE (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
#endif
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP