Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (110 commits) sh: i2c-sh7760: Replase from ctrl_* to __raw_* sh: clkfwk: Shuffle around to match the intc split up. sh: clkfwk: modify for_each_frequency end condition sh: fix clk_get() error handling sh: clkfwk: Fix fault in frequency iterator. sh: clkfwk: Add a helper for rate rounding by divisor ranges. sh: clkfwk: Abstract rate rounding helper. sh: clkfwk: support clock remapping. sh: pci: Convert to upper/lower_32_bits() helpers. sh: mach-sdk7786: Add support for the FPGA SRAM. sh: Provide a generic SRAM pool for tiny memories. sh: pci: Support secondary FPGA-driven PCIe clocks on SDK7786. sh: pci: Support slot 4 routing on SDK7786. sh: Fix up PMB locking. sh: mach-sdk7786: Add support for fpga gpios. sh: use pr_fmt for clock framework, too. sh: remove name and id from struct clk sh: free-without-alloc fix for sh_mobile_lcdcfb sh: perf: Set up perf_max_events. sh: perf: Support SH-X3 hardware counters. ... Fix up trivial conflicts (perf_max_events got removed) in arch/sh/kernel/perf_event.c
This commit is contained in:
@@ -168,6 +168,10 @@ config IOREMAP_FIXED
|
||||
config UNCACHED_MAPPING
|
||||
bool
|
||||
|
||||
config HAVE_SRAM_POOL
|
||||
bool
|
||||
select GENERIC_ALLOCATOR
|
||||
|
||||
choice
|
||||
prompt "Kernel page size"
|
||||
default PAGE_SIZE_4KB
|
||||
|
@@ -40,6 +40,7 @@ obj-$(CONFIG_PMB) += pmb.o
|
||||
obj-$(CONFIG_NUMA) += numa.o
|
||||
obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o
|
||||
obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o
|
||||
obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o
|
||||
|
||||
# Special flags for fault_64.o. This puts restrictions on the number of
|
||||
# caller-save registers that the compiler can target when building this file.
|
||||
@@ -66,4 +67,4 @@ CFLAGS_fault_64.o += -ffixed-r7 \
|
||||
-ffixed-r60 -ffixed-r61 -ffixed-r62 \
|
||||
-fomit-frame-pointer
|
||||
|
||||
EXTRA_CFLAGS += -Werror
|
||||
ccflags-y := -Werror
|
||||
|
@@ -63,7 +63,7 @@ static int __init asids_debugfs_init(void)
|
||||
{
|
||||
struct dentry *asids_dentry;
|
||||
|
||||
asids_dentry = debugfs_create_file("asids", S_IRUSR, sh_debugfs_root,
|
||||
asids_dentry = debugfs_create_file("asids", S_IRUSR, arch_debugfs_dir,
|
||||
NULL, &asids_debugfs_fops);
|
||||
if (!asids_dentry)
|
||||
return -ENOMEM;
|
||||
|
@@ -126,25 +126,19 @@ static int __init cache_debugfs_init(void)
|
||||
{
|
||||
struct dentry *dcache_dentry, *icache_dentry;
|
||||
|
||||
dcache_dentry = debugfs_create_file("dcache", S_IRUSR, sh_debugfs_root,
|
||||
dcache_dentry = debugfs_create_file("dcache", S_IRUSR, arch_debugfs_dir,
|
||||
(unsigned int *)CACHE_TYPE_DCACHE,
|
||||
&cache_debugfs_fops);
|
||||
if (!dcache_dentry)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(dcache_dentry))
|
||||
return PTR_ERR(dcache_dentry);
|
||||
|
||||
icache_dentry = debugfs_create_file("icache", S_IRUSR, sh_debugfs_root,
|
||||
icache_dentry = debugfs_create_file("icache", S_IRUSR, arch_debugfs_dir,
|
||||
(unsigned int *)CACHE_TYPE_ICACHE,
|
||||
&cache_debugfs_fops);
|
||||
if (!icache_dentry) {
|
||||
debugfs_remove(dcache_dentry);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (IS_ERR(icache_dentry)) {
|
||||
debugfs_remove(dcache_dentry);
|
||||
return PTR_ERR(icache_dentry);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -38,11 +38,12 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||
void *ret, *ret_nocache;
|
||||
int order = get_order(size);
|
||||
|
||||
gfp |= __GFP_ZERO;
|
||||
|
||||
ret = (void *)__get_free_pages(gfp, order);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
|
||||
memset(ret, 0, size);
|
||||
/*
|
||||
* Pages from the page allocator may have data present in
|
||||
* cache. So flush the cache before using uncached memory.
|
||||
|
@@ -47,7 +47,6 @@ static pte_t *__get_pte_phys(unsigned long addr)
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
if (pgd_none(*pgd)) {
|
||||
@@ -67,8 +66,7 @@ static pte_t *__get_pte_phys(unsigned long addr)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
return pte;
|
||||
return pte_offset_kernel(pmd, addr);
|
||||
}
|
||||
|
||||
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
|
||||
@@ -125,13 +123,45 @@ void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
|
||||
clear_pte_phys(address, prot);
|
||||
}
|
||||
|
||||
static pmd_t * __init one_md_table_init(pud_t *pud)
|
||||
{
|
||||
if (pud_none(*pud)) {
|
||||
pmd_t *pmd;
|
||||
|
||||
pmd = alloc_bootmem_pages(PAGE_SIZE);
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
BUG_ON(pmd != pmd_offset(pud, 0));
|
||||
}
|
||||
|
||||
return pmd_offset(pud, 0);
|
||||
}
|
||||
|
||||
static pte_t * __init one_page_table_init(pmd_t *pmd)
|
||||
{
|
||||
if (pmd_none(*pmd)) {
|
||||
pte_t *pte;
|
||||
|
||||
pte = alloc_bootmem_pages(PAGE_SIZE);
|
||||
pmd_populate_kernel(&init_mm, pmd, pte);
|
||||
BUG_ON(pte != pte_offset_kernel(pmd, 0));
|
||||
}
|
||||
|
||||
return pte_offset_kernel(pmd, 0);
|
||||
}
|
||||
|
||||
static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
|
||||
unsigned long vaddr, pte_t *lastpte)
|
||||
{
|
||||
return pte;
|
||||
}
|
||||
|
||||
void __init page_table_range_init(unsigned long start, unsigned long end,
|
||||
pgd_t *pgd_base)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pte_t *pte = NULL;
|
||||
int i, j, k;
|
||||
unsigned long vaddr;
|
||||
|
||||
@@ -144,19 +174,13 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
|
||||
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
|
||||
pud = (pud_t *)pgd;
|
||||
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
|
||||
#ifdef __PAGETABLE_PMD_FOLDED
|
||||
pmd = (pmd_t *)pud;
|
||||
#else
|
||||
pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
pmd = one_md_table_init(pud);
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
pmd += k;
|
||||
#endif
|
||||
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
|
||||
if (pmd_none(*pmd)) {
|
||||
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
|
||||
pmd_populate_kernel(&init_mm, pmd, pte);
|
||||
BUG_ON(pte != pte_offset_kernel(pmd, 0));
|
||||
}
|
||||
pte = page_table_kmap_check(one_page_table_init(pmd),
|
||||
pmd, vaddr, pte);
|
||||
vaddr += PMD_SIZE;
|
||||
}
|
||||
k = 0;
|
||||
|
@@ -67,6 +67,10 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
BUG();
|
||||
}
|
||||
|
||||
void __flush_tlb_global(void)
|
||||
{
|
||||
}
|
||||
|
||||
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
|
||||
{
|
||||
}
|
||||
|
@@ -40,7 +40,7 @@ struct pmb_entry {
|
||||
unsigned long flags;
|
||||
unsigned long size;
|
||||
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
|
||||
/*
|
||||
* 0 .. NR_PMB_ENTRIES for specific entry selection, or
|
||||
@@ -265,7 +265,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
|
||||
|
||||
memset(pmbe, 0, sizeof(struct pmb_entry));
|
||||
|
||||
spin_lock_init(&pmbe->lock);
|
||||
raw_spin_lock_init(&pmbe->lock);
|
||||
|
||||
pmbe->vpn = vpn;
|
||||
pmbe->ppn = ppn;
|
||||
@@ -327,9 +327,9 @@ static void set_pmb_entry(struct pmb_entry *pmbe)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pmbe->lock, flags);
|
||||
raw_spin_lock_irqsave(&pmbe->lock, flags);
|
||||
__set_pmb_entry(pmbe);
|
||||
spin_unlock_irqrestore(&pmbe->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmbe->lock, flags);
|
||||
}
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
@@ -368,7 +368,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
|
||||
return PTR_ERR(pmbe);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pmbe->lock, flags);
|
||||
raw_spin_lock_irqsave(&pmbe->lock, flags);
|
||||
|
||||
pmbe->size = pmb_sizes[i].size;
|
||||
|
||||
@@ -383,9 +383,10 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
|
||||
* entries for easier tear-down.
|
||||
*/
|
||||
if (likely(pmbp)) {
|
||||
spin_lock(&pmbp->lock);
|
||||
raw_spin_lock_nested(&pmbp->lock,
|
||||
SINGLE_DEPTH_NESTING);
|
||||
pmbp->link = pmbe;
|
||||
spin_unlock(&pmbp->lock);
|
||||
raw_spin_unlock(&pmbp->lock);
|
||||
}
|
||||
|
||||
pmbp = pmbe;
|
||||
@@ -398,7 +399,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
|
||||
i--;
|
||||
mapped++;
|
||||
|
||||
spin_unlock_irqrestore(&pmbe->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmbe->lock, flags);
|
||||
}
|
||||
} while (size >= SZ_16M);
|
||||
|
||||
@@ -627,15 +628,14 @@ static void __init pmb_synchronize(void)
|
||||
continue;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pmbe->lock, irqflags);
|
||||
raw_spin_lock_irqsave(&pmbe->lock, irqflags);
|
||||
|
||||
for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
|
||||
if (pmb_sizes[j].flag == size)
|
||||
pmbe->size = pmb_sizes[j].size;
|
||||
|
||||
if (pmbp) {
|
||||
spin_lock(&pmbp->lock);
|
||||
|
||||
raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
|
||||
/*
|
||||
* Compare the previous entry against the current one to
|
||||
* see if the entries span a contiguous mapping. If so,
|
||||
@@ -644,13 +644,12 @@ static void __init pmb_synchronize(void)
|
||||
*/
|
||||
if (pmb_can_merge(pmbp, pmbe))
|
||||
pmbp->link = pmbe;
|
||||
|
||||
spin_unlock(&pmbp->lock);
|
||||
raw_spin_unlock(&pmbp->lock);
|
||||
}
|
||||
|
||||
pmbp = pmbe;
|
||||
|
||||
spin_unlock_irqrestore(&pmbe->lock, irqflags);
|
||||
raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -757,7 +756,7 @@ static void __init pmb_resize(void)
|
||||
/*
|
||||
* Found it, now resize it.
|
||||
*/
|
||||
spin_lock_irqsave(&pmbe->lock, flags);
|
||||
raw_spin_lock_irqsave(&pmbe->lock, flags);
|
||||
|
||||
pmbe->size = SZ_16M;
|
||||
pmbe->flags &= ~PMB_SZ_MASK;
|
||||
@@ -767,7 +766,7 @@ static void __init pmb_resize(void)
|
||||
|
||||
__set_pmb_entry(pmbe);
|
||||
|
||||
spin_unlock_irqrestore(&pmbe->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmbe->lock, flags);
|
||||
}
|
||||
|
||||
read_unlock(&pmb_rwlock);
|
||||
@@ -866,11 +865,9 @@ static int __init pmb_debugfs_init(void)
|
||||
struct dentry *dentry;
|
||||
|
||||
dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
|
||||
sh_debugfs_root, NULL, &pmb_debugfs_fops);
|
||||
arch_debugfs_dir, NULL, &pmb_debugfs_fops);
|
||||
if (!dentry)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(dentry))
|
||||
return PTR_ERR(dentry);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
34
arch/sh/mm/sram.c
Normal file
34
arch/sh/mm/sram.c
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* SRAM pool for tiny memories not otherwise managed.
|
||||
*
|
||||
* Copyright (C) 2010 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/sram.h>
|
||||
|
||||
/*
|
||||
* This provides a standard SRAM pool for tiny memories that can be
|
||||
* added either by the CPU or the platform code. Typical SRAM sizes
|
||||
* to be inserted in to the pool will generally be less than the page
|
||||
* size, with anything more reasonably sized handled as a NUMA memory
|
||||
* node.
|
||||
*/
|
||||
struct gen_pool *sram_pool;
|
||||
|
||||
static int __init sram_pool_init(void)
|
||||
{
|
||||
/*
|
||||
* This is a global pool, we don't care about node locality.
|
||||
*/
|
||||
sram_pool = gen_pool_create(1, -1);
|
||||
if (unlikely(!sram_pool))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(sram_pool_init);
|
@@ -151,15 +151,13 @@ static int __init tlb_debugfs_init(void)
|
||||
{
|
||||
struct dentry *itlb, *utlb;
|
||||
|
||||
itlb = debugfs_create_file("itlb", S_IRUSR, sh_debugfs_root,
|
||||
itlb = debugfs_create_file("itlb", S_IRUSR, arch_debugfs_dir,
|
||||
(unsigned int *)TLB_TYPE_ITLB,
|
||||
&tlb_debugfs_fops);
|
||||
if (unlikely(!itlb))
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(itlb))
|
||||
return PTR_ERR(itlb);
|
||||
|
||||
utlb = debugfs_create_file("utlb", S_IRUSR, sh_debugfs_root,
|
||||
utlb = debugfs_create_file("utlb", S_IRUSR, arch_debugfs_dir,
|
||||
(unsigned int *)TLB_TYPE_UTLB,
|
||||
&tlb_debugfs_fops);
|
||||
if (unlikely(!utlb)) {
|
||||
@@ -167,11 +165,6 @@ static int __init tlb_debugfs_init(void)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (IS_ERR(utlb)) {
|
||||
debugfs_remove(itlb);
|
||||
return PTR_ERR(utlb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(tlb_debugfs_init);
|
||||
|
@@ -119,3 +119,19 @@ void local_flush_tlb_mm(struct mm_struct *mm)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
|
||||
void __flush_tlb_global(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/*
|
||||
* This is the most destructive of the TLB flushing options,
|
||||
* and will tear down all of the UTLB/ITLB mappings, including
|
||||
* wired entries.
|
||||
*/
|
||||
__raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
@@ -455,6 +455,11 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
void __flush_tlb_global(void)
|
||||
{
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
|
||||
{
|
||||
}
|
||||
|
Reference in New Issue
Block a user