Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: "The major updates included in this update are: - Clang compatible stack pointer accesses by Behan Webster. - SA11x0 updates from Dmitry Eremin-Solenikov. - kgdb handling of breakpoints with read-only text/modules - Support for Privileged-no-execute feature on ARMv7 to prevent userspace code execution by the kernel. - AMBA primecell bus handling of irq-safe runtime PM - Unwinding support for memset/memzero/memmove/memcpy functions - VFP fixes for Krait CPUs and improvements in detecting the VFP architecture - A number of code cleanups (using pr_*, removing or reducing the severity of a couple of kernel messages, splitting ftrace asm code out to a separate file, etc.) - Add machine name to stack dump output" * 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (62 commits) ARM: 8247/2: pcmcia: sa1100: make use of device clock ARM: 8246/2: pcmcia: sa1111: provide device clock ARM: 8245/1: pcmcia: soc-common: enable/disable socket clocks ARM: 8244/1: fbdev: sa1100fb: make use of device clock ARM: 8243/1: sa1100: add a clock alias for sa1111 pcmcia device ARM: 8242/1: sa1100: add cpu clock ARM: 8221/1: PJ4: allow building in Thumb-2 mode ARM: 8234/1: sa1100: reorder IRQ handling code ARM: 8233/1: sa1100: switch to hwirq usage ARM: 8232/1: sa1100: merge GPIO multiplexer IRQ to "normal" irq domain ARM: 8231/1: sa1100: introduce irqdomains support ARM: 8230/1: sa1100: shift IRQs by one ARM: 8229/1: sa1100: replace irq numbers with names in irq driver ARM: 8228/1: sa1100: drop entry-macro.S ARM: 8227/1: sa1100: switch to MULTI_IRQ_HANDLER ARM: 8241/1: Update processor_modes for hyp and monitor mode ARM: 8240/1: MCPM: document mcpm_sync_init() ARM: 8239/1: Introduce {set,clear}_pte_bit ARM: 8238/1: mm: Refine set_memory_* functions ARM: 8237/1: fix flush_pfn_alias ...
This commit is contained in:
@@ -1009,3 +1009,24 @@ config ARCH_SUPPORTS_BIG_ENDIAN
|
||||
help
|
||||
This option specifies the architecture can support big endian
|
||||
operation.
|
||||
|
||||
config ARM_KERNMEM_PERMS
|
||||
bool "Restrict kernel memory permissions"
|
||||
help
|
||||
If this is set, kernel memory other than kernel text (and rodata)
|
||||
will be made non-executable. The tradeoff is that each region is
|
||||
padded to section-size (1MiB) boundaries (because their permissions
|
||||
are different and splitting the 1M pages into 4K ones causes TLB
|
||||
performance problems), wasting memory.
|
||||
|
||||
config DEBUG_RODATA
|
||||
bool "Make kernel text and rodata read-only"
|
||||
depends on ARM_KERNMEM_PERMS
|
||||
default y
|
||||
help
|
||||
If this is set, kernel text and rodata will be made read-only. This
|
||||
is to help catch accidental or malicious attempts to change the
|
||||
kernel's executable code. Additionally splits rodata from kernel
|
||||
text so it can be made explicitly non-executable. This creates
|
||||
another section-size padded region, so it can waste more memory
|
||||
space while gaining the read-only protections.
|
||||
|
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
|
||||
iomap.o
|
||||
|
||||
obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
|
||||
mmap.o pgd.o mmu.o
|
||||
mmap.o pgd.o mmu.o pageattr.o
|
||||
|
||||
ifneq ($(CONFIG_MMU),y)
|
||||
obj-y += nommu.o
|
||||
|
@@ -113,7 +113,7 @@ static int safe_usermode(int new_usermode, bool warn)
|
||||
new_usermode |= UM_FIXUP;
|
||||
|
||||
if (warn)
|
||||
printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
|
||||
pr_warn("alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
|
||||
}
|
||||
|
||||
return new_usermode;
|
||||
@@ -523,7 +523,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg
|
||||
* processor for us.
|
||||
*/
|
||||
if (addr != eaddr) {
|
||||
printk(KERN_ERR "LDMSTM: PC = %08lx, instr = %08lx, "
|
||||
pr_err("LDMSTM: PC = %08lx, instr = %08lx, "
|
||||
"addr = %08lx, eaddr = %08lx\n",
|
||||
instruction_pointer(regs), instr, addr, eaddr);
|
||||
show_regs(regs);
|
||||
@@ -567,7 +567,7 @@ fault:
|
||||
return TYPE_FAULT;
|
||||
|
||||
bad:
|
||||
printk(KERN_ERR "Alignment trap: not handling ldm with s-bit set\n");
|
||||
pr_err("Alignment trap: not handling ldm with s-bit set\n");
|
||||
return TYPE_ERROR;
|
||||
}
|
||||
|
||||
@@ -899,13 +899,13 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||
return 0;
|
||||
|
||||
swp:
|
||||
printk(KERN_ERR "Alignment trap: not handling swp instruction\n");
|
||||
pr_err("Alignment trap: not handling swp instruction\n");
|
||||
|
||||
bad:
|
||||
/*
|
||||
* Oops, we didn't handle the instruction.
|
||||
*/
|
||||
printk(KERN_ERR "Alignment trap: not handling instruction "
|
||||
pr_err("Alignment trap: not handling instruction "
|
||||
"%0*lx at [<%08lx>]\n",
|
||||
isize << 1,
|
||||
isize == 2 ? tinstr : instr, instrptr);
|
||||
|
@@ -313,7 +313,7 @@ static void __init disable_l2_prefetch(void)
|
||||
*/
|
||||
u = read_extra_features();
|
||||
if (!(u & 0x01000000)) {
|
||||
printk(KERN_INFO "Feroceon L2: Disabling L2 prefetch.\n");
|
||||
pr_info("Feroceon L2: Disabling L2 prefetch.\n");
|
||||
write_extra_features(u | 0x01000000);
|
||||
}
|
||||
}
|
||||
@@ -326,7 +326,7 @@ static void __init enable_l2(void)
|
||||
if (!(u & 0x00400000)) {
|
||||
int i, d;
|
||||
|
||||
printk(KERN_INFO "Feroceon L2: Enabling L2\n");
|
||||
pr_info("Feroceon L2: Enabling L2\n");
|
||||
|
||||
d = flush_and_disable_dcache();
|
||||
i = invalidate_and_disable_icache();
|
||||
@@ -353,7 +353,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
|
||||
|
||||
enable_l2();
|
||||
|
||||
printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n",
|
||||
pr_info("Feroceon L2: Cache support initialised%s.\n",
|
||||
l2_wt_override ? ", in WT override mode" : "");
|
||||
}
|
||||
#ifdef CONFIG_OF
|
||||
|
@@ -185,7 +185,7 @@ static void enable_extra_feature(unsigned int features)
|
||||
u &= ~0x01000000;
|
||||
else
|
||||
u |= 0x01000000;
|
||||
printk(KERN_INFO "Tauros2: %s L2 prefetch.\n",
|
||||
pr_info("Tauros2: %s L2 prefetch.\n",
|
||||
(features & CACHE_TAUROS2_PREFETCH_ON)
|
||||
? "Enabling" : "Disabling");
|
||||
|
||||
@@ -193,7 +193,7 @@ static void enable_extra_feature(unsigned int features)
|
||||
u |= 0x00100000;
|
||||
else
|
||||
u &= ~0x00100000;
|
||||
printk(KERN_INFO "Tauros2: %s line fill burt8.\n",
|
||||
pr_info("Tauros2: %s line fill burt8.\n",
|
||||
(features & CACHE_TAUROS2_LINEFILL_BURST8)
|
||||
? "Enabling" : "Disabling");
|
||||
|
||||
@@ -216,7 +216,7 @@ static void __init tauros2_internal_init(unsigned int features)
|
||||
*/
|
||||
feat = read_extra_features();
|
||||
if (!(feat & 0x00400000)) {
|
||||
printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
|
||||
pr_info("Tauros2: Enabling L2 cache.\n");
|
||||
write_extra_features(feat | 0x00400000);
|
||||
}
|
||||
|
||||
@@ -253,7 +253,7 @@ static void __init tauros2_internal_init(unsigned int features)
|
||||
*/
|
||||
actlr = read_actlr();
|
||||
if (!(actlr & 0x00000002)) {
|
||||
printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
|
||||
pr_info("Tauros2: Enabling L2 cache.\n");
|
||||
write_actlr(actlr | 0x00000002);
|
||||
}
|
||||
|
||||
@@ -262,11 +262,11 @@ static void __init tauros2_internal_init(unsigned int features)
|
||||
#endif
|
||||
|
||||
if (mode == NULL) {
|
||||
printk(KERN_CRIT "Tauros2: Unable to detect CPU mode.\n");
|
||||
pr_crit("Tauros2: Unable to detect CPU mode.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "Tauros2: L2 cache support initialised "
|
||||
pr_info("Tauros2: L2 cache support initialised "
|
||||
"in %s mode.\n", mode);
|
||||
}
|
||||
|
||||
|
@@ -184,36 +184,46 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||
u64 asid = atomic64_read(&mm->context.id);
|
||||
u64 generation = atomic64_read(&asid_generation);
|
||||
|
||||
if (asid != 0 && is_reserved_asid(asid)) {
|
||||
if (asid != 0) {
|
||||
/*
|
||||
* Our current ASID was active during a rollover, we can
|
||||
* continue to use it and this was just a false alarm.
|
||||
* If our current ASID was active during a rollover, we
|
||||
* can continue to use it and this was just a false alarm.
|
||||
*/
|
||||
asid = generation | (asid & ~ASID_MASK);
|
||||
} else {
|
||||
if (is_reserved_asid(asid))
|
||||
return generation | (asid & ~ASID_MASK);
|
||||
|
||||
/*
|
||||
* Allocate a free ASID. If we can't find one, take a
|
||||
* note of the currently active ASIDs and mark the TLBs
|
||||
* as requiring flushes. We always count from ASID #1,
|
||||
* as we reserve ASID #0 to switch via TTBR0 and to
|
||||
* avoid speculative page table walks from hitting in
|
||||
* any partial walk caches, which could be populated
|
||||
* from overlapping level-1 descriptors used to map both
|
||||
* the module area and the userspace stack.
|
||||
* We had a valid ASID in a previous life, so try to re-use
|
||||
* it if possible.,
|
||||
*/
|
||||
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
|
||||
if (asid == NUM_USER_ASIDS) {
|
||||
generation = atomic64_add_return(ASID_FIRST_VERSION,
|
||||
&asid_generation);
|
||||
flush_context(cpu);
|
||||
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
|
||||
}
|
||||
__set_bit(asid, asid_map);
|
||||
cur_idx = asid;
|
||||
asid |= generation;
|
||||
cpumask_clear(mm_cpumask(mm));
|
||||
asid &= ~ASID_MASK;
|
||||
if (!__test_and_set_bit(asid, asid_map))
|
||||
goto bump_gen;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a free ASID. If we can't find one, take a note of the
|
||||
* currently active ASIDs and mark the TLBs as requiring flushes.
|
||||
* We always count from ASID #1, as we reserve ASID #0 to switch
|
||||
* via TTBR0 and to avoid speculative page table walks from hitting
|
||||
* in any partial walk caches, which could be populated from
|
||||
* overlapping level-1 descriptors used to map both the module
|
||||
* area and the userspace stack.
|
||||
*/
|
||||
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
|
||||
if (asid == NUM_USER_ASIDS) {
|
||||
generation = atomic64_add_return(ASID_FIRST_VERSION,
|
||||
&asid_generation);
|
||||
flush_context(cpu);
|
||||
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
|
||||
}
|
||||
|
||||
__set_bit(asid, asid_map);
|
||||
cur_idx = asid;
|
||||
|
||||
bump_gen:
|
||||
asid |= generation;
|
||||
cpumask_clear(mm_cpumask(mm));
|
||||
return asid;
|
||||
}
|
||||
|
||||
|
@@ -62,7 +62,7 @@ static void discard_old_kernel_data(void *kto)
|
||||
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
|
||||
:
|
||||
: "r" (kto),
|
||||
"r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
|
||||
"r" ((unsigned long)kto + PAGE_SIZE - 1)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
|
@@ -235,7 +235,7 @@ void __init check_writebuffer_bugs(void)
|
||||
const char *reason;
|
||||
unsigned long v = 1;
|
||||
|
||||
printk(KERN_INFO "CPU: Testing write buffer coherency: ");
|
||||
pr_info("CPU: Testing write buffer coherency: ");
|
||||
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
if (page) {
|
||||
@@ -261,9 +261,9 @@ void __init check_writebuffer_bugs(void)
|
||||
}
|
||||
|
||||
if (v) {
|
||||
printk("failed, %s\n", reason);
|
||||
pr_cont("failed, %s\n", reason);
|
||||
shared_pte_mask = L_PTE_MT_UNCACHED;
|
||||
} else {
|
||||
printk("ok\n");
|
||||
pr_cont("ok\n");
|
||||
}
|
||||
}
|
||||
|
@@ -63,9 +63,9 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
|
||||
if (!mm)
|
||||
mm = &init_mm;
|
||||
|
||||
printk(KERN_ALERT "pgd = %p\n", mm->pgd);
|
||||
pr_alert("pgd = %p\n", mm->pgd);
|
||||
pgd = pgd_offset(mm, addr);
|
||||
printk(KERN_ALERT "[%08lx] *pgd=%08llx",
|
||||
pr_alert("[%08lx] *pgd=%08llx",
|
||||
addr, (long long)pgd_val(*pgd));
|
||||
|
||||
do {
|
||||
@@ -77,31 +77,31 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
|
||||
break;
|
||||
|
||||
if (pgd_bad(*pgd)) {
|
||||
printk("(bad)");
|
||||
pr_cont("(bad)");
|
||||
break;
|
||||
}
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (PTRS_PER_PUD != 1)
|
||||
printk(", *pud=%08llx", (long long)pud_val(*pud));
|
||||
pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
|
||||
|
||||
if (pud_none(*pud))
|
||||
break;
|
||||
|
||||
if (pud_bad(*pud)) {
|
||||
printk("(bad)");
|
||||
pr_cont("(bad)");
|
||||
break;
|
||||
}
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (PTRS_PER_PMD != 1)
|
||||
printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
|
||||
pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd));
|
||||
|
||||
if (pmd_none(*pmd))
|
||||
break;
|
||||
|
||||
if (pmd_bad(*pmd)) {
|
||||
printk("(bad)");
|
||||
pr_cont("(bad)");
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -110,15 +110,15 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
|
||||
break;
|
||||
|
||||
pte = pte_offset_map(pmd, addr);
|
||||
printk(", *pte=%08llx", (long long)pte_val(*pte));
|
||||
pr_cont(", *pte=%08llx", (long long)pte_val(*pte));
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
printk(", *ppte=%08llx",
|
||||
pr_cont(", *ppte=%08llx",
|
||||
(long long)pte_val(pte[PTE_HWTABLE_PTRS]));
|
||||
#endif
|
||||
pte_unmap(pte);
|
||||
} while(0);
|
||||
|
||||
printk("\n");
|
||||
pr_cont("\n");
|
||||
}
|
||||
#else /* CONFIG_MMU */
|
||||
void show_pte(struct mm_struct *mm, unsigned long addr)
|
||||
@@ -142,10 +142,9 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
|
||||
* No handler, we'll have to terminate things with extreme prejudice.
|
||||
*/
|
||||
bust_spinlocks(1);
|
||||
printk(KERN_ALERT
|
||||
"Unable to handle kernel %s at virtual address %08lx\n",
|
||||
(addr < PAGE_SIZE) ? "NULL pointer dereference" :
|
||||
"paging request", addr);
|
||||
pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
|
||||
(addr < PAGE_SIZE) ? "NULL pointer dereference" :
|
||||
"paging request", addr);
|
||||
|
||||
show_pte(mm, addr);
|
||||
die("Oops", regs, fsr);
|
||||
@@ -551,7 +550,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||
if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
|
||||
return;
|
||||
|
||||
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
|
||||
pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
|
||||
inf->name, fsr, addr);
|
||||
|
||||
info.si_signo = inf->sig;
|
||||
@@ -583,7 +582,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
|
||||
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
|
||||
return;
|
||||
|
||||
printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
|
||||
pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
|
||||
inf->name, ifsr, addr);
|
||||
|
||||
info.si_signo = inf->sig;
|
||||
|
@@ -33,7 +33,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
|
||||
asm( "mcrr p15, 0, %1, %0, c14\n"
|
||||
" mcr p15, 0, %2, c7, c10, 4"
|
||||
:
|
||||
: "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
|
||||
: "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
|
@@ -18,19 +18,20 @@
|
||||
#include <asm/tlbflush.h>
|
||||
#include "mm.h"
|
||||
|
||||
pte_t *fixmap_page_table;
|
||||
|
||||
static inline void set_fixmap_pte(int idx, pte_t pte)
|
||||
{
|
||||
unsigned long vaddr = __fix_to_virt(idx);
|
||||
set_pte_ext(fixmap_page_table + idx, pte, 0);
|
||||
pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
|
||||
|
||||
set_pte_ext(ptep, pte, 0);
|
||||
local_flush_tlb_kernel_page(vaddr);
|
||||
}
|
||||
|
||||
static inline pte_t get_fixmap_pte(unsigned long vaddr)
|
||||
{
|
||||
unsigned long idx = __virt_to_fix(vaddr);
|
||||
return *(fixmap_page_table + idx);
|
||||
pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
|
||||
|
||||
return *ptep;
|
||||
}
|
||||
|
||||
void *kmap(struct page *page)
|
||||
@@ -84,7 +85,7 @@ void *kmap_atomic(struct page *page)
|
||||
* With debugging enabled, kunmap_atomic forces that entry to 0.
|
||||
* Make sure it was indeed properly unmapped.
|
||||
*/
|
||||
BUG_ON(!pte_none(*(fixmap_page_table + idx)));
|
||||
BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
|
||||
#endif
|
||||
/*
|
||||
* When debugging is off, kunmap_atomic leaves the previous mapping
|
||||
@@ -137,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
|
||||
idx = type + KM_TYPE_NR * smp_processor_id();
|
||||
vaddr = __fix_to_virt(idx);
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
BUG_ON(!pte_none(*(fixmap_page_table + idx)));
|
||||
BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
|
||||
#endif
|
||||
set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
|
||||
|
||||
|
@@ -29,6 +29,7 @@
|
||||
#include <asm/prom.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
@@ -67,7 +68,7 @@ early_param("initrd", early_initrd);
|
||||
|
||||
static int __init parse_tag_initrd(const struct tag *tag)
|
||||
{
|
||||
printk(KERN_WARNING "ATAG_INITRD is deprecated; "
|
||||
pr_warn("ATAG_INITRD is deprecated; "
|
||||
"please update your bootloader.\n");
|
||||
phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
|
||||
phys_initrd_size = tag->u.initrd.size;
|
||||
@@ -544,7 +545,7 @@ void __init mem_init(void)
|
||||
#define MLM(b, t) b, t, ((t) - (b)) >> 20
|
||||
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
|
||||
|
||||
printk(KERN_NOTICE "Virtual kernel memory layout:\n"
|
||||
pr_notice("Virtual kernel memory layout:\n"
|
||||
" vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
#ifdef CONFIG_HAVE_TCM
|
||||
" DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
@@ -570,7 +571,7 @@ void __init mem_init(void)
|
||||
MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
|
||||
MLK(ITCM_OFFSET, (unsigned long) itcm_end),
|
||||
#endif
|
||||
MLK(FIXADDR_START, FIXADDR_TOP),
|
||||
MLK(FIXADDR_START, FIXADDR_END),
|
||||
MLM(VMALLOC_START, VMALLOC_END),
|
||||
MLM(PAGE_OFFSET, (unsigned long)high_memory),
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
@@ -615,7 +616,145 @@ void __init mem_init(void)
|
||||
}
|
||||
}
|
||||
|
||||
void free_initmem(void)
|
||||
#ifdef CONFIG_ARM_KERNMEM_PERMS
|
||||
struct section_perm {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
pmdval_t mask;
|
||||
pmdval_t prot;
|
||||
pmdval_t clear;
|
||||
};
|
||||
|
||||
static struct section_perm nx_perms[] = {
|
||||
/* Make pages tables, etc before _stext RW (set NX). */
|
||||
{
|
||||
.start = PAGE_OFFSET,
|
||||
.end = (unsigned long)_stext,
|
||||
.mask = ~PMD_SECT_XN,
|
||||
.prot = PMD_SECT_XN,
|
||||
},
|
||||
/* Make init RW (set NX). */
|
||||
{
|
||||
.start = (unsigned long)__init_begin,
|
||||
.end = (unsigned long)_sdata,
|
||||
.mask = ~PMD_SECT_XN,
|
||||
.prot = PMD_SECT_XN,
|
||||
},
|
||||
#ifdef CONFIG_DEBUG_RODATA
|
||||
/* Make rodata NX (set RO in ro_perms below). */
|
||||
{
|
||||
.start = (unsigned long)__start_rodata,
|
||||
.end = (unsigned long)__init_begin,
|
||||
.mask = ~PMD_SECT_XN,
|
||||
.prot = PMD_SECT_XN,
|
||||
},
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_RODATA
|
||||
static struct section_perm ro_perms[] = {
|
||||
/* Make kernel code and rodata RX (set RO). */
|
||||
{
|
||||
.start = (unsigned long)_stext,
|
||||
.end = (unsigned long)__init_begin,
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
.mask = ~PMD_SECT_RDONLY,
|
||||
.prot = PMD_SECT_RDONLY,
|
||||
#else
|
||||
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
|
||||
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
|
||||
.clear = PMD_SECT_AP_WRITE,
|
||||
#endif
|
||||
},
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Updates section permissions only for the current mm (sections are
|
||||
* copied into each mm). During startup, this is the init_mm. Is only
|
||||
* safe to be called with preemption disabled, as under stop_machine().
|
||||
*/
|
||||
static inline void section_update(unsigned long addr, pmdval_t mask,
|
||||
pmdval_t prot)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
pmd_t *pmd;
|
||||
|
||||
mm = current->active_mm;
|
||||
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
|
||||
#else
|
||||
if (addr & SECTION_SIZE)
|
||||
pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
|
||||
else
|
||||
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
|
||||
#endif
|
||||
flush_pmd_entry(pmd);
|
||||
local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
|
||||
}
|
||||
|
||||
/* Make sure extended page tables are in use. */
|
||||
static inline bool arch_has_strict_perms(void)
|
||||
{
|
||||
if (cpu_architecture() < CPU_ARCH_ARMv6)
|
||||
return false;
|
||||
|
||||
return !!(get_cr() & CR_XP);
|
||||
}
|
||||
|
||||
#define set_section_perms(perms, field) { \
|
||||
size_t i; \
|
||||
unsigned long addr; \
|
||||
\
|
||||
if (!arch_has_strict_perms()) \
|
||||
return; \
|
||||
\
|
||||
for (i = 0; i < ARRAY_SIZE(perms); i++) { \
|
||||
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
|
||||
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
|
||||
pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
|
||||
perms[i].start, perms[i].end, \
|
||||
SECTION_SIZE); \
|
||||
continue; \
|
||||
} \
|
||||
\
|
||||
for (addr = perms[i].start; \
|
||||
addr < perms[i].end; \
|
||||
addr += SECTION_SIZE) \
|
||||
section_update(addr, perms[i].mask, \
|
||||
perms[i].field); \
|
||||
} \
|
||||
}
|
||||
|
||||
static inline void fix_kernmem_perms(void)
|
||||
{
|
||||
set_section_perms(nx_perms, prot);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_RODATA
|
||||
void mark_rodata_ro(void)
|
||||
{
|
||||
set_section_perms(ro_perms, prot);
|
||||
}
|
||||
|
||||
void set_kernel_text_rw(void)
|
||||
{
|
||||
set_section_perms(ro_perms, clear);
|
||||
}
|
||||
|
||||
void set_kernel_text_ro(void)
|
||||
{
|
||||
set_section_perms(ro_perms, prot);
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_RODATA */
|
||||
|
||||
#else
|
||||
static inline void fix_kernmem_perms(void) { }
|
||||
#endif /* CONFIG_ARM_KERNMEM_PERMS */
|
||||
|
||||
void free_tcmmem(void)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_TCM
|
||||
extern char __tcm_start, __tcm_end;
|
||||
@@ -623,6 +762,12 @@ void free_initmem(void)
|
||||
poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
|
||||
free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
|
||||
#endif
|
||||
}
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
fix_kernmem_perms();
|
||||
free_tcmmem();
|
||||
|
||||
poison_init_mem(__init_begin, __init_end - __init_begin);
|
||||
if (!machine_is_integrator() && !machine_is_cintegrator())
|
||||
|
@@ -22,6 +22,7 @@
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/cachetype.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/smp_plat.h>
|
||||
@@ -52,6 +53,8 @@ EXPORT_SYMBOL(empty_zero_page);
|
||||
*/
|
||||
pmd_t *top_pmd;
|
||||
|
||||
pmdval_t user_pmd_table = _PAGE_USER_TABLE;
|
||||
|
||||
#define CPOLICY_UNCACHED 0
|
||||
#define CPOLICY_BUFFERED 1
|
||||
#define CPOLICY_WRITETHROUGH 2
|
||||
@@ -192,7 +195,7 @@ early_param("cachepolicy", early_cachepolicy);
|
||||
static int __init early_nocache(char *__unused)
|
||||
{
|
||||
char *p = "buffered";
|
||||
printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
|
||||
pr_warn("nocache is deprecated; use cachepolicy=%s\n", p);
|
||||
early_cachepolicy(p);
|
||||
return 0;
|
||||
}
|
||||
@@ -201,7 +204,7 @@ early_param("nocache", early_nocache);
|
||||
static int __init early_nowrite(char *__unused)
|
||||
{
|
||||
char *p = "uncached";
|
||||
printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
|
||||
pr_warn("nowb is deprecated; use cachepolicy=%s\n", p);
|
||||
early_cachepolicy(p);
|
||||
return 0;
|
||||
}
|
||||
@@ -354,44 +357,29 @@ const struct mem_type *get_mem_type(unsigned int type)
|
||||
}
|
||||
EXPORT_SYMBOL(get_mem_type);
|
||||
|
||||
#define PTE_SET_FN(_name, pteop) \
|
||||
static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \
|
||||
void *data) \
|
||||
{ \
|
||||
pte_t pte = pteop(*ptep); \
|
||||
\
|
||||
set_pte_ext(ptep, pte, 0); \
|
||||
return 0; \
|
||||
} \
|
||||
/*
|
||||
* To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
|
||||
* As a result, this can only be called with preemption disabled, as under
|
||||
* stop_machine().
|
||||
*/
|
||||
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
|
||||
{
|
||||
unsigned long vaddr = __fix_to_virt(idx);
|
||||
pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
|
||||
|
||||
#define SET_MEMORY_FN(_name, callback) \
|
||||
int set_memory_##_name(unsigned long addr, int numpages) \
|
||||
{ \
|
||||
unsigned long start = addr; \
|
||||
unsigned long size = PAGE_SIZE*numpages; \
|
||||
unsigned end = start + size; \
|
||||
\
|
||||
if (start < MODULES_VADDR || start >= MODULES_END) \
|
||||
return -EINVAL;\
|
||||
\
|
||||
if (end < MODULES_VADDR || end >= MODULES_END) \
|
||||
return -EINVAL; \
|
||||
\
|
||||
apply_to_page_range(&init_mm, start, size, callback, NULL); \
|
||||
flush_tlb_kernel_range(start, end); \
|
||||
return 0;\
|
||||
/* Make sure fixmap region does not exceed available allocation. */
|
||||
BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
|
||||
FIXADDR_END);
|
||||
BUG_ON(idx >= __end_of_fixed_addresses);
|
||||
|
||||
if (pgprot_val(prot))
|
||||
set_pte_at(NULL, vaddr, pte,
|
||||
pfn_pte(phys >> PAGE_SHIFT, prot));
|
||||
else
|
||||
pte_clear(NULL, vaddr, pte);
|
||||
local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
|
||||
}
|
||||
|
||||
PTE_SET_FN(ro, pte_wrprotect)
|
||||
PTE_SET_FN(rw, pte_mkwrite)
|
||||
PTE_SET_FN(x, pte_mkexec)
|
||||
PTE_SET_FN(nx, pte_mknexec)
|
||||
|
||||
SET_MEMORY_FN(ro, pte_set_ro)
|
||||
SET_MEMORY_FN(rw, pte_set_rw)
|
||||
SET_MEMORY_FN(x, pte_set_x)
|
||||
SET_MEMORY_FN(nx, pte_set_nx)
|
||||
|
||||
/*
|
||||
* Adjust the PMD section entries according to the CPU in use.
|
||||
*/
|
||||
@@ -528,14 +516,23 @@ static void __init build_mem_type_table(void)
|
||||
hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
|
||||
s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
|
||||
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
/*
|
||||
* We don't use domains on ARMv6 (since this causes problems with
|
||||
* v6/v7 kernels), so we must use a separate memory type for user
|
||||
* r/o, kernel r/w to map the vectors page.
|
||||
*/
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
if (cpu_arch == CPU_ARCH_ARMv6)
|
||||
vecs_pgprot |= L_PTE_MT_VECTORS;
|
||||
|
||||
/*
|
||||
* Check is it with support for the PXN bit
|
||||
* in the Short-descriptor translation table format descriptors.
|
||||
*/
|
||||
if (cpu_arch == CPU_ARCH_ARMv7 &&
|
||||
(read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) {
|
||||
user_pmd_table |= PMD_PXNTABLE;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -605,6 +602,11 @@ static void __init build_mem_type_table(void)
|
||||
}
|
||||
kern_pgprot |= PTE_EXT_AF;
|
||||
vecs_pgprot |= PTE_EXT_AF;
|
||||
|
||||
/*
|
||||
* Set PXN for user mappings
|
||||
*/
|
||||
user_pgprot |= PTE_EXT_PXN;
|
||||
#endif
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
@@ -786,8 +788,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
|
||||
length = PAGE_ALIGN(md->length);
|
||||
|
||||
if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
|
||||
printk(KERN_ERR "MM: CPU does not support supersection "
|
||||
"mapping for 0x%08llx at 0x%08lx\n",
|
||||
pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n",
|
||||
(long long)__pfn_to_phys((u64)md->pfn), addr);
|
||||
return;
|
||||
}
|
||||
@@ -799,15 +800,13 @@ static void __init create_36bit_mapping(struct map_desc *md,
|
||||
* of the actual domain assignments in use.
|
||||
*/
|
||||
if (type->domain) {
|
||||
printk(KERN_ERR "MM: invalid domain in supersection "
|
||||
"mapping for 0x%08llx at 0x%08lx\n",
|
||||
pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n",
|
||||
(long long)__pfn_to_phys((u64)md->pfn), addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
|
||||
printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
|
||||
" at 0x%08lx invalid alignment\n",
|
||||
pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n",
|
||||
(long long)__pfn_to_phys((u64)md->pfn), addr);
|
||||
return;
|
||||
}
|
||||
@@ -850,18 +849,16 @@ static void __init create_mapping(struct map_desc *md)
|
||||
pgd_t *pgd;
|
||||
|
||||
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
|
||||
printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
|
||||
" at 0x%08lx in user region\n",
|
||||
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
|
||||
pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
|
||||
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
|
||||
md->virtual >= PAGE_OFFSET &&
|
||||
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
|
||||
printk(KERN_WARNING "BUG: mapping for 0x%08llx"
|
||||
" at 0x%08lx out of vmalloc space\n",
|
||||
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
|
||||
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
|
||||
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
|
||||
}
|
||||
|
||||
type = &mem_types[md->type];
|
||||
@@ -881,9 +878,8 @@ static void __init create_mapping(struct map_desc *md)
|
||||
length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
|
||||
|
||||
if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
|
||||
printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
|
||||
"be mapped using pages, ignoring.\n",
|
||||
(long long)__pfn_to_phys(md->pfn), addr);
|
||||
pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n",
|
||||
(long long)__pfn_to_phys(md->pfn), addr);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1053,15 +1049,13 @@ static int __init early_vmalloc(char *arg)
|
||||
|
||||
if (vmalloc_reserve < SZ_16M) {
|
||||
vmalloc_reserve = SZ_16M;
|
||||
printk(KERN_WARNING
|
||||
"vmalloc area too small, limiting to %luMB\n",
|
||||
pr_warn("vmalloc area too small, limiting to %luMB\n",
|
||||
vmalloc_reserve >> 20);
|
||||
}
|
||||
|
||||
if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
|
||||
vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
|
||||
printk(KERN_WARNING
|
||||
"vmalloc area is too big, limiting to %luMB\n",
|
||||
pr_warn("vmalloc area is too big, limiting to %luMB\n",
|
||||
vmalloc_reserve >> 20);
|
||||
}
|
||||
|
||||
@@ -1094,7 +1088,7 @@ void __init sanity_check_meminfo(void)
|
||||
|
||||
if (highmem) {
|
||||
pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
|
||||
&block_start, &block_end);
|
||||
&block_start, &block_end);
|
||||
memblock_remove(reg->base, reg->size);
|
||||
continue;
|
||||
}
|
||||
@@ -1103,7 +1097,7 @@ void __init sanity_check_meminfo(void)
|
||||
phys_addr_t overlap_size = reg->size - size_limit;
|
||||
|
||||
pr_notice("Truncating RAM at %pa-%pa to -%pa",
|
||||
&block_start, &block_end, &vmalloc_limit);
|
||||
&block_start, &block_end, &vmalloc_limit);
|
||||
memblock_remove(vmalloc_limit, overlap_size);
|
||||
block_end = vmalloc_limit;
|
||||
}
|
||||
@@ -1326,10 +1320,10 @@ static void __init kmap_init(void)
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
|
||||
PKMAP_BASE, _PAGE_KERNEL_TABLE);
|
||||
|
||||
fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START),
|
||||
FIXADDR_START, _PAGE_KERNEL_TABLE);
|
||||
#endif
|
||||
|
||||
early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
|
||||
_PAGE_KERNEL_TABLE);
|
||||
}
|
||||
|
||||
static void __init map_lowmem(void)
|
||||
@@ -1349,12 +1343,19 @@ static void __init map_lowmem(void)
|
||||
if (start >= end)
|
||||
break;
|
||||
|
||||
if (end < kernel_x_start || start >= kernel_x_end) {
|
||||
if (end < kernel_x_start) {
|
||||
map.pfn = __phys_to_pfn(start);
|
||||
map.virtual = __phys_to_virt(start);
|
||||
map.length = end - start;
|
||||
map.type = MT_MEMORY_RWX;
|
||||
|
||||
create_mapping(&map);
|
||||
} else if (start >= kernel_x_end) {
|
||||
map.pfn = __phys_to_pfn(start);
|
||||
map.virtual = __phys_to_virt(start);
|
||||
map.length = end - start;
|
||||
map.type = MT_MEMORY_RW;
|
||||
|
||||
create_mapping(&map);
|
||||
} else {
|
||||
/* This better cover the entire kernel */
|
||||
|
91
arch/arm/mm/pageattr.c
Normal file
91
arch/arm/mm/pageattr.c
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
struct page_change_data {
|
||||
pgprot_t set_mask;
|
||||
pgprot_t clear_mask;
|
||||
};
|
||||
|
||||
static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
|
||||
void *data)
|
||||
{
|
||||
struct page_change_data *cdata = data;
|
||||
pte_t pte = *ptep;
|
||||
|
||||
pte = clear_pte_bit(pte, cdata->clear_mask);
|
||||
pte = set_pte_bit(pte, cdata->set_mask);
|
||||
|
||||
set_pte_ext(ptep, pte, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int change_memory_common(unsigned long addr, int numpages,
|
||||
pgprot_t set_mask, pgprot_t clear_mask)
|
||||
{
|
||||
unsigned long start = addr;
|
||||
unsigned long size = PAGE_SIZE*numpages;
|
||||
unsigned long end = start + size;
|
||||
int ret;
|
||||
struct page_change_data data;
|
||||
|
||||
if (!IS_ALIGNED(addr, PAGE_SIZE)) {
|
||||
start &= PAGE_MASK;
|
||||
end = start + size;
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
if (!is_module_address(start) || !is_module_address(end - 1))
|
||||
return -EINVAL;
|
||||
|
||||
data.set_mask = set_mask;
|
||||
data.clear_mask = clear_mask;
|
||||
|
||||
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
|
||||
&data);
|
||||
|
||||
flush_tlb_kernel_range(start, end);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int set_memory_ro(unsigned long addr, int numpages)
|
||||
{
|
||||
return change_memory_common(addr, numpages,
|
||||
__pgprot(L_PTE_RDONLY),
|
||||
__pgprot(0));
|
||||
}
|
||||
|
||||
int set_memory_rw(unsigned long addr, int numpages)
|
||||
{
|
||||
return change_memory_common(addr, numpages,
|
||||
__pgprot(0),
|
||||
__pgprot(L_PTE_RDONLY));
|
||||
}
|
||||
|
||||
int set_memory_nx(unsigned long addr, int numpages)
|
||||
{
|
||||
return change_memory_common(addr, numpages,
|
||||
__pgprot(L_PTE_XN),
|
||||
__pgprot(0));
|
||||
}
|
||||
|
||||
int set_memory_x(unsigned long addr, int numpages)
|
||||
{
|
||||
return change_memory_common(addr, numpages,
|
||||
__pgprot(0),
|
||||
__pgprot(L_PTE_XN));
|
||||
}
|
@@ -591,9 +591,10 @@ __krait_proc_info:
|
||||
/*
|
||||
* Some Krait processors don't indicate support for SDIV and UDIV
|
||||
* instructions in the ARM instruction set, even though they actually
|
||||
* do support them.
|
||||
* do support them. They also don't indicate support for fused multiply
|
||||
* instructions even though they actually do support them.
|
||||
*/
|
||||
__v7_proc __v7_setup, hwcaps = HWCAP_IDIV
|
||||
__v7_proc __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4
|
||||
.size __krait_proc_info, . - __krait_proc_info
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user