tile: Use the more common pr_warn instead of pr_warning
And other message logging neatening. Other miscellanea: o coalesce formats o realign arguments o standardize a couple of macros o use __func__ instead of embedding the function name Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
This commit is contained in:

committed by
Chris Metcalf

parent
ebd25caf7d
commit
f47436734d
@@ -169,8 +169,7 @@ static void wait_for_migration(pte_t *pte)
|
||||
while (pte_migrating(*pte)) {
|
||||
barrier();
|
||||
if (++retries > bound)
|
||||
panic("Hit migrating PTE (%#llx) and"
|
||||
" page PFN %#lx still migrating",
|
||||
panic("Hit migrating PTE (%#llx) and page PFN %#lx still migrating",
|
||||
pte->val, pte_pfn(*pte));
|
||||
}
|
||||
}
|
||||
@@ -292,11 +291,10 @@ static int handle_page_fault(struct pt_regs *regs,
|
||||
*/
|
||||
stack_offset = stack_pointer & (THREAD_SIZE-1);
|
||||
if (stack_offset < THREAD_SIZE / 8) {
|
||||
pr_alert("Potential stack overrun: sp %#lx\n",
|
||||
stack_pointer);
|
||||
pr_alert("Potential stack overrun: sp %#lx\n", stack_pointer);
|
||||
show_regs(regs);
|
||||
pr_alert("Killing current process %d/%s\n",
|
||||
tsk->pid, tsk->comm);
|
||||
tsk->pid, tsk->comm);
|
||||
do_group_exit(SIGKILL);
|
||||
}
|
||||
|
||||
@@ -421,7 +419,7 @@ good_area:
|
||||
} else if (write) {
|
||||
#ifdef TEST_VERIFY_AREA
|
||||
if (!is_page_fault && regs->cs == KERNEL_CS)
|
||||
pr_err("WP fault at "REGFMT"\n", regs->eip);
|
||||
pr_err("WP fault at " REGFMT "\n", regs->eip);
|
||||
#endif
|
||||
if (!(vma->vm_flags & VM_WRITE))
|
||||
goto bad_area;
|
||||
@@ -519,16 +517,15 @@ no_context:
|
||||
pte_t *pte = lookup_address(address);
|
||||
|
||||
if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
|
||||
pr_crit("kernel tried to execute"
|
||||
" non-executable page - exploit attempt?"
|
||||
" (uid: %d)\n", current->uid);
|
||||
pr_crit("kernel tried to execute non-executable page - exploit attempt? (uid: %d)\n",
|
||||
current->uid);
|
||||
}
|
||||
#endif
|
||||
if (address < PAGE_SIZE)
|
||||
pr_alert("Unable to handle kernel NULL pointer dereference\n");
|
||||
else
|
||||
pr_alert("Unable to handle kernel paging request\n");
|
||||
pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n",
|
||||
pr_alert(" at virtual address " REGFMT ", pc " REGFMT "\n",
|
||||
address, regs->pc);
|
||||
|
||||
show_regs(regs);
|
||||
@@ -575,9 +572,10 @@ do_sigbus:
|
||||
#ifndef __tilegx__
|
||||
|
||||
/* We must release ICS before panicking or we won't get anywhere. */
|
||||
#define ics_panic(fmt, ...) do { \
|
||||
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
|
||||
panic(fmt, __VA_ARGS__); \
|
||||
#define ics_panic(fmt, ...) \
|
||||
do { \
|
||||
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
|
||||
panic(fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
@@ -615,8 +613,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
|
||||
fault_num != INT_DTLB_ACCESS)) {
|
||||
unsigned long old_pc = regs->pc;
|
||||
regs->pc = pc;
|
||||
ics_panic("Bad ICS page fault args:"
|
||||
" old PC %#lx, fault %d/%d at %#lx\n",
|
||||
ics_panic("Bad ICS page fault args: old PC %#lx, fault %d/%d at %#lx",
|
||||
old_pc, fault_num, write, address);
|
||||
}
|
||||
|
||||
@@ -669,8 +666,8 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
|
||||
#endif
|
||||
fixup = search_exception_tables(pc);
|
||||
if (!fixup)
|
||||
ics_panic("ICS atomic fault not in table:"
|
||||
" PC %#lx, fault %d", pc, fault_num);
|
||||
ics_panic("ICS atomic fault not in table: PC %#lx, fault %d",
|
||||
pc, fault_num);
|
||||
regs->pc = fixup->fixup;
|
||||
regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
|
||||
}
|
||||
@@ -826,8 +823,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
|
||||
|
||||
set_thread_flag(TIF_ASYNC_TLB);
|
||||
if (async->fault_num != 0) {
|
||||
panic("Second async fault %d;"
|
||||
" old fault was %d (%#lx/%ld)",
|
||||
panic("Second async fault %d; old fault was %d (%#lx/%ld)",
|
||||
fault_num, async->fault_num,
|
||||
address, write);
|
||||
}
|
||||
|
@@ -152,12 +152,10 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
|
||||
cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
|
||||
cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
|
||||
|
||||
pr_err("hv_flush_remote(%#llx, %#lx, %p [%s],"
|
||||
" %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
|
||||
pr_err("hv_flush_remote(%#llx, %#lx, %p [%s], %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
|
||||
cache_pa, cache_control, cache_cpumask, cache_buf,
|
||||
(unsigned long)tlb_va, tlb_length, tlb_pgsize,
|
||||
tlb_cpumask, tlb_buf,
|
||||
asids, asidcount, rc);
|
||||
tlb_cpumask, tlb_buf, asids, asidcount, rc);
|
||||
panic("Unsafe to continue.");
|
||||
}
|
||||
|
||||
|
@@ -284,22 +284,21 @@ static __init int __setup_hugepagesz(unsigned long ps)
|
||||
int level, base_shift;
|
||||
|
||||
if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
|
||||
pr_warn("Not enabling %ld byte huge pages;"
|
||||
" must be a power of four.\n", ps);
|
||||
pr_warn("Not enabling %ld byte huge pages; must be a power of four\n",
|
||||
ps);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ps > 64*1024*1024*1024UL) {
|
||||
pr_warn("Not enabling %ld MB huge pages;"
|
||||
" largest legal value is 64 GB .\n", ps >> 20);
|
||||
pr_warn("Not enabling %ld MB huge pages; largest legal value is 64 GB\n",
|
||||
ps >> 20);
|
||||
return -EINVAL;
|
||||
} else if (ps >= PUD_SIZE) {
|
||||
static long hv_jpage_size;
|
||||
if (hv_jpage_size == 0)
|
||||
hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
|
||||
if (hv_jpage_size != PUD_SIZE) {
|
||||
pr_warn("Not enabling >= %ld MB huge pages:"
|
||||
" hypervisor reports size %ld\n",
|
||||
pr_warn("Not enabling >= %ld MB huge pages: hypervisor reports size %ld\n",
|
||||
PUD_SIZE >> 20, hv_jpage_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -320,14 +319,13 @@ static __init int __setup_hugepagesz(unsigned long ps)
|
||||
int shift_val = log_ps - base_shift;
|
||||
if (huge_shift[level] != 0) {
|
||||
int old_shift = base_shift + huge_shift[level];
|
||||
pr_warn("Not enabling %ld MB huge pages;"
|
||||
" already have size %ld MB.\n",
|
||||
pr_warn("Not enabling %ld MB huge pages; already have size %ld MB\n",
|
||||
ps >> 20, (1UL << old_shift) >> 20);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (hv_set_pte_super_shift(level, shift_val) != 0) {
|
||||
pr_warn("Not enabling %ld MB huge pages;"
|
||||
" no hypervisor support.\n", ps >> 20);
|
||||
pr_warn("Not enabling %ld MB huge pages; no hypervisor support\n",
|
||||
ps >> 20);
|
||||
return -EINVAL;
|
||||
}
|
||||
printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
|
||||
|
@@ -357,11 +357,11 @@ static int __init setup_ktext(char *str)
|
||||
cpulist_scnprintf(buf, sizeof(buf), &ktext_mask);
|
||||
if (cpumask_weight(&ktext_mask) > 1) {
|
||||
ktext_small = 1;
|
||||
pr_info("ktext: using caching neighborhood %s "
|
||||
"with small pages\n", buf);
|
||||
pr_info("ktext: using caching neighborhood %s with small pages\n",
|
||||
buf);
|
||||
} else {
|
||||
pr_info("ktext: caching on cpu %s with one huge page\n",
|
||||
buf);
|
||||
buf);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -413,19 +413,16 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
|
||||
int rc, i;
|
||||
|
||||
if (ktext_arg_seen && ktext_hash) {
|
||||
pr_warning("warning: \"ktext\" boot argument ignored"
|
||||
" if \"kcache_hash\" sets up text hash-for-home\n");
|
||||
pr_warn("warning: \"ktext\" boot argument ignored if \"kcache_hash\" sets up text hash-for-home\n");
|
||||
ktext_small = 0;
|
||||
}
|
||||
|
||||
if (kdata_arg_seen && kdata_hash) {
|
||||
pr_warning("warning: \"kdata\" boot argument ignored"
|
||||
" if \"kcache_hash\" sets up data hash-for-home\n");
|
||||
pr_warn("warning: \"kdata\" boot argument ignored if \"kcache_hash\" sets up data hash-for-home\n");
|
||||
}
|
||||
|
||||
if (kdata_huge && !hash_default) {
|
||||
pr_warning("warning: disabling \"kdata=huge\"; requires"
|
||||
" kcache_hash=all or =allbutstack\n");
|
||||
pr_warn("warning: disabling \"kdata=huge\"; requires kcache_hash=all or =allbutstack\n");
|
||||
kdata_huge = 0;
|
||||
}
|
||||
|
||||
@@ -470,8 +467,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
|
||||
pte[pte_ofs] = pfn_pte(pfn, prot);
|
||||
} else {
|
||||
if (kdata_huge)
|
||||
printk(KERN_DEBUG "pre-shattered huge"
|
||||
" page at %#lx\n", address);
|
||||
printk(KERN_DEBUG "pre-shattered huge page at %#lx\n",
|
||||
address);
|
||||
for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
|
||||
pfn++, pte_ofs++, address += PAGE_SIZE) {
|
||||
pgprot_t prot = init_pgprot(address);
|
||||
@@ -501,8 +498,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
|
||||
pr_info("ktext: not using unavailable cpus %s\n", buf);
|
||||
}
|
||||
if (cpumask_empty(&ktext_mask)) {
|
||||
pr_warning("ktext: no valid cpus; caching on %d.\n",
|
||||
smp_processor_id());
|
||||
pr_warn("ktext: no valid cpus; caching on %d\n",
|
||||
smp_processor_id());
|
||||
cpumask_copy(&ktext_mask,
|
||||
cpumask_of(smp_processor_id()));
|
||||
}
|
||||
@@ -798,11 +795,9 @@ void __init mem_init(void)
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/* check that fixmap and pkmap do not overlap */
|
||||
if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) {
|
||||
pr_err("fixmap and kmap areas overlap"
|
||||
" - this will crash\n");
|
||||
pr_err("fixmap and kmap areas overlap - this will crash\n");
|
||||
pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n",
|
||||
PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1),
|
||||
FIXADDR_START);
|
||||
PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), FIXADDR_START);
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
@@ -926,8 +921,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
|
||||
unsigned long addr = (unsigned long) begin;
|
||||
|
||||
if (kdata_huge && !initfree) {
|
||||
pr_warning("Warning: ignoring initfree=0:"
|
||||
" incompatible with kdata=huge\n");
|
||||
pr_warn("Warning: ignoring initfree=0: incompatible with kdata=huge\n");
|
||||
initfree = 1;
|
||||
}
|
||||
end = (end + PAGE_SIZE - 1) & PAGE_MASK;
|
||||
|
@@ -44,9 +44,7 @@ void show_mem(unsigned int filter)
|
||||
{
|
||||
struct zone *zone;
|
||||
|
||||
pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu"
|
||||
" free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu"
|
||||
" pagecache:%lu swap:%lu\n",
|
||||
pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu pagecache:%lu swap:%lu\n",
|
||||
(global_page_state(NR_ACTIVE_ANON) +
|
||||
global_page_state(NR_ACTIVE_FILE)),
|
||||
(global_page_state(NR_INACTIVE_ANON) +
|
||||
|
Reference in New Issue
Block a user