Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Merge sparc bug fixes that didn't make it into v3.9 into sparc-next. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -62,7 +62,6 @@ config SPARC64
|
||||
select HAVE_RCU_TABLE_FREE if SMP
|
||||
select HAVE_MEMBLOCK
|
||||
select HAVE_MEMBLOCK_NODE_MAP
|
||||
select HAVE_SYSCALL_WRAPPERS
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
@@ -257,29 +256,6 @@ config HOTPLUG_CPU
|
||||
|
||||
if SPARC64
|
||||
source "drivers/cpufreq/Kconfig"
|
||||
|
||||
config US3_FREQ
|
||||
tristate "UltraSPARC-III CPU Frequency driver"
|
||||
depends on CPU_FREQ
|
||||
select CPU_FREQ_TABLE
|
||||
help
|
||||
This adds the CPUFreq driver for UltraSPARC-III processors.
|
||||
|
||||
For details, take a look at <file:Documentation/cpu-freq>.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config US2E_FREQ
|
||||
tristate "UltraSPARC-IIe CPU Frequency driver"
|
||||
depends on CPU_FREQ
|
||||
select CPU_FREQ_TABLE
|
||||
help
|
||||
This adds the CPUFreq driver for UltraSPARC-IIe processors.
|
||||
|
||||
For details, take a look at <file:Documentation/cpu-freq>.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
endif
|
||||
|
||||
config US3_MC
|
||||
@@ -414,6 +390,8 @@ config SERIAL_CONSOLE
|
||||
config SPARC_LEON
|
||||
bool "Sparc Leon processor family"
|
||||
depends on SPARC32
|
||||
select USB_EHCI_BIG_ENDIAN_MMIO
|
||||
select USB_EHCI_BIG_ENDIAN_DESC
|
||||
---help---
|
||||
If you say Y here if you are running on a SPARC-LEON processor.
|
||||
The LEON processor is a synthesizable VHDL model of the
|
||||
|
@@ -2,11 +2,16 @@
|
||||
|
||||
|
||||
generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
generic-y += div64.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += exec.h
|
||||
generic-y += local64.h
|
||||
generic-y += mutex.h
|
||||
generic-y += irq_regs.h
|
||||
generic-y += local.h
|
||||
generic-y += module.h
|
||||
generic-y += serial.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += types.h
|
||||
generic-y += word-at-a-time.h
|
||||
|
@@ -1,6 +0,0 @@
|
||||
#ifndef __SPARC_CPUTIME_H
|
||||
#define __SPARC_CPUTIME_H
|
||||
|
||||
#include <asm-generic/cputime.h>
|
||||
|
||||
#endif /* __SPARC_CPUTIME_H */
|
@@ -1,6 +0,0 @@
|
||||
#ifndef _ASM_EMERGENCY_RESTART_H
|
||||
#define _ASM_EMERGENCY_RESTART_H
|
||||
|
||||
#include <asm-generic/emergency-restart.h>
|
||||
|
||||
#endif /* _ASM_EMERGENCY_RESTART_H */
|
@@ -2,6 +2,7 @@
|
||||
#define _ASM_SPARC64_HUGETLB_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm-generic/hugetlb.h>
|
||||
|
||||
|
||||
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
|
@@ -1,9 +0,0 @@
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
@@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
|
||||
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
|
||||
}
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
/* We provide our own get_unmapped_area to cope with VA holes and
|
||||
|
@@ -1,6 +0,0 @@
|
||||
#ifndef __SPARC_SERIAL_H
|
||||
#define __SPARC_SERIAL_H
|
||||
|
||||
#define BASE_BAUD ( 1843200 / 16 )
|
||||
|
||||
#endif /* __SPARC_SERIAL_H */
|
@@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
|
||||
unsigned long, unsigned long);
|
||||
|
||||
void cpu_panic(void);
|
||||
extern void smp4m_irq_rotate(int cpu);
|
||||
|
||||
/*
|
||||
* General functions that each host system must provide.
|
||||
@@ -46,7 +45,6 @@ void sun4m_init_smp(void);
|
||||
void sun4d_init_smp(void);
|
||||
|
||||
void smp_callin(void);
|
||||
void smp_boot_cpus(void);
|
||||
void smp_store_cpu_info(int);
|
||||
|
||||
void smp_resched_interrupt(void);
|
||||
@@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void);
|
||||
|
||||
#define raw_smp_processor_id() (current_thread_info()->cpu)
|
||||
|
||||
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
|
||||
#define prof_counter(__cpu) cpu_data(__cpu).counter
|
||||
|
||||
void smp_setup_cpu_possible_map(void);
|
||||
|
||||
#endif /* !(__ASSEMBLY__) */
|
||||
|
@@ -18,8 +18,7 @@ do { \
|
||||
* and 2 stores in this critical code path. -DaveM
|
||||
*/
|
||||
#define switch_to(prev, next, last) \
|
||||
do { flush_tlb_pending(); \
|
||||
save_and_clear_fpu(); \
|
||||
do { save_and_clear_fpu(); \
|
||||
/* If you are tempted to conditionalize the following */ \
|
||||
/* so that ASI is only written if it changes, think again. */ \
|
||||
__asm__ __volatile__("wr %%g0, %0, %%asi" \
|
||||
|
@@ -132,8 +132,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
||||
#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
|
||||
_TIF_SIGPENDING)
|
||||
|
||||
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_THREAD_INFO_H */
|
||||
|
@@ -256,8 +256,6 @@ static inline bool test_and_clear_restore_sigmask(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
|
||||
|
||||
#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0)
|
||||
#define test_thread_64bit_stack(__SP) \
|
||||
((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \
|
||||
|
@@ -11,24 +11,40 @@
|
||||
struct tlb_batch {
|
||||
struct mm_struct *mm;
|
||||
unsigned long tlb_nr;
|
||||
unsigned long active;
|
||||
unsigned long vaddrs[TLB_BATCH_NR];
|
||||
};
|
||||
|
||||
extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
|
||||
extern void flush_tsb_user(struct tlb_batch *tb);
|
||||
extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
|
||||
|
||||
/* TLB flush operations. */
|
||||
|
||||
extern void flush_tlb_pending(void);
|
||||
static inline void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
#define flush_tlb_range(vma,start,end) \
|
||||
do { (void)(start); flush_tlb_pending(); } while (0)
|
||||
#define flush_tlb_page(vma,addr) flush_tlb_pending()
|
||||
#define flush_tlb_mm(mm) flush_tlb_pending()
|
||||
static inline void flush_tlb_page(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
|
||||
|
||||
extern void flush_tlb_pending(void);
|
||||
extern void arch_enter_lazy_mmu_mode(void);
|
||||
extern void arch_leave_lazy_mmu_mode(void);
|
||||
#define arch_flush_lazy_mmu_mode() do {} while (0)
|
||||
|
||||
/* Local cpu only. */
|
||||
extern void __flush_tlb_all(void);
|
||||
|
||||
extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
|
||||
extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
@@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \
|
||||
__flush_tlb_kernel_range(start,end); \
|
||||
} while (0)
|
||||
|
||||
static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
|
||||
{
|
||||
__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
|
||||
}
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
|
||||
|
||||
#define flush_tlb_kernel_range(start, end) \
|
||||
do { flush_tsb_kernel_range(start,end); \
|
||||
smp_flush_tlb_kernel_range(start, end); \
|
||||
} while (0)
|
||||
|
||||
#define global_flush_tlb_page(mm, vaddr) \
|
||||
smp_flush_tlb_page(mm, vaddr)
|
||||
|
||||
#endif /* ! CONFIG_SMP */
|
||||
|
||||
#endif /* _SPARC64_TLBFLUSH_H */
|
||||
|
@@ -45,12 +45,4 @@
|
||||
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
|
||||
#endif
|
||||
|
||||
/*
|
||||
* "Conditional" syscalls
|
||||
*
|
||||
* What we want is __attribute__((weak,alias("sys_ni_syscall"))),
|
||||
* but it doesn't work on all toolchains, so we just do it by hand
|
||||
*/
|
||||
#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
|
||||
|
||||
#endif /* _SPARC_UNISTD_H */
|
||||
|
@@ -44,7 +44,6 @@ header-y += swab.h
|
||||
header-y += termbits.h
|
||||
header-y += termios.h
|
||||
header-y += traps.h
|
||||
header-y += types.h
|
||||
header-y += uctx.h
|
||||
header-y += unistd.h
|
||||
header-y += utrap.h
|
||||
|
@@ -68,6 +68,8 @@
|
||||
|
||||
#define SO_LOCK_FILTER 0x0028
|
||||
|
||||
#define SO_SELECT_ERR_QUEUE 0x0029
|
||||
|
||||
/* Security levels - as per NRL IPv6 - don't actually do anything */
|
||||
#define SO_SECURITY_AUTHENTICATION 0x5001
|
||||
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
|
||||
|
@@ -1,17 +0,0 @@
|
||||
#ifndef _SPARC_TYPES_H
|
||||
#define _SPARC_TYPES_H
|
||||
/*
|
||||
* This file is never included by application software unless
|
||||
* explicitly requested (e.g., via linux/types.h) in which case the
|
||||
* application is Linux specific so (user-) name space pollution is
|
||||
* not a major issue. However, for interoperability, libraries still
|
||||
* need to be careful to avoid a name clashes.
|
||||
*/
|
||||
|
||||
#if defined(__sparc__)
|
||||
|
||||
#include <asm-generic/int-ll64.h>
|
||||
|
||||
#endif /* defined(__sparc__) */
|
||||
|
||||
#endif /* defined(_SPARC_TYPES_H) */
|
@@ -103,9 +103,6 @@ obj-$(CONFIG_PCI_MSI) += pci_msi.o
|
||||
|
||||
obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o
|
||||
|
||||
# sparc64 cpufreq
|
||||
obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o
|
||||
obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o
|
||||
obj-$(CONFIG_US3_MC) += chmc.o
|
||||
|
||||
obj-$(CONFIG_KPROBES) += kprobes.o
|
||||
|
@@ -128,8 +128,7 @@ hv_cpu_startup:
|
||||
|
||||
call smp_callin
|
||||
nop
|
||||
call cpu_idle
|
||||
mov 0, %o0
|
||||
|
||||
call cpu_panic
|
||||
nop
|
||||
|
||||
|
@@ -693,7 +693,7 @@ static int sparc_io_proc_show(struct seq_file *m, void *v)
|
||||
|
||||
static int sparc_io_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, sparc_io_proc_show, PDE(inode)->data);
|
||||
return single_open(file, sparc_io_proc_show, PDE_DATA(inode));
|
||||
}
|
||||
|
||||
static const struct file_operations sparc_io_proc_fops = {
|
||||
|
@@ -213,6 +213,7 @@ unsigned int leon_build_device_irq(unsigned int real_irq,
|
||||
{
|
||||
unsigned int irq;
|
||||
unsigned long mask;
|
||||
struct irq_desc *desc;
|
||||
|
||||
irq = 0;
|
||||
mask = leon_get_irqmask(real_irq);
|
||||
@@ -226,9 +227,12 @@ unsigned int leon_build_device_irq(unsigned int real_irq,
|
||||
if (do_ack)
|
||||
mask |= LEON_DO_ACK_HW;
|
||||
|
||||
irq_set_chip_and_handler_name(irq, &leon_irq,
|
||||
flow_handler, name);
|
||||
irq_set_chip_data(irq, (void *)mask);
|
||||
desc = irq_to_desc(irq);
|
||||
if (!desc || !desc->handle_irq || desc->handle_irq == handle_bad_irq) {
|
||||
irq_set_chip_and_handler_name(irq, &leon_irq,
|
||||
flow_handler, name);
|
||||
irq_set_chip_data(irq, (void *)mask);
|
||||
}
|
||||
|
||||
out:
|
||||
return irq;
|
||||
|
@@ -64,23 +64,12 @@ extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
|
||||
struct task_struct *last_task_used_math = NULL;
|
||||
struct thread_info *current_set[NR_CPUS];
|
||||
|
||||
/*
|
||||
* the idle loop on a Sparc... ;)
|
||||
*/
|
||||
void cpu_idle(void)
|
||||
/* Idle loop support. */
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
|
||||
/* endless idle loop with no priority at all */
|
||||
for (;;) {
|
||||
while (!need_resched()) {
|
||||
if (sparc_idle)
|
||||
(*sparc_idle)();
|
||||
else
|
||||
cpu_relax();
|
||||
}
|
||||
schedule_preempt_disabled();
|
||||
}
|
||||
if (sparc_idle)
|
||||
(*sparc_idle)();
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
|
||||
@@ -123,6 +112,8 @@ void show_regs(struct pt_regs *r)
|
||||
{
|
||||
struct reg_window32 *rw = (struct reg_window32 *) r->u_regs[14];
|
||||
|
||||
show_regs_print_info(KERN_DEFAULT);
|
||||
|
||||
printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
|
||||
r->psr, r->pc, r->npc, r->y, print_tainted());
|
||||
printk("PC: <%pS>\n", (void *) r->pc);
|
||||
@@ -153,11 +144,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
|
||||
struct reg_window32 *rw;
|
||||
int count = 0;
|
||||
|
||||
if (tsk != NULL)
|
||||
task_base = (unsigned long) task_stack_page(tsk);
|
||||
else
|
||||
task_base = (unsigned long) current_thread_info();
|
||||
if (!tsk)
|
||||
tsk = current;
|
||||
|
||||
if (tsk == current && !_ksp)
|
||||
__asm__ __volatile__("mov %%fp, %0" : "=r" (_ksp));
|
||||
|
||||
task_base = (unsigned long) task_stack_page(tsk);
|
||||
fp = (unsigned long) _ksp;
|
||||
do {
|
||||
/* Bogus frame pointer? */
|
||||
@@ -173,17 +166,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
void dump_stack(void)
|
||||
{
|
||||
unsigned long *ksp;
|
||||
|
||||
__asm__ __volatile__("mov %%fp, %0"
|
||||
: "=r" (ksp));
|
||||
show_stack(current, ksp);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(dump_stack);
|
||||
|
||||
/*
|
||||
* Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
|
||||
*/
|
||||
|
@@ -52,20 +52,17 @@
|
||||
|
||||
#include "kstack.h"
|
||||
|
||||
static void sparc64_yield(int cpu)
|
||||
/* Idle loop support on sparc64. */
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
if (tlb_type != hypervisor) {
|
||||
touch_nmi_watchdog();
|
||||
return;
|
||||
}
|
||||
|
||||
clear_thread_flag(TIF_POLLING_NRFLAG);
|
||||
smp_mb__after_clear_bit();
|
||||
|
||||
while (!need_resched() && !cpu_is_offline(cpu)) {
|
||||
} else {
|
||||
unsigned long pstate;
|
||||
|
||||
/* Disable interrupts. */
|
||||
/* The sun4v sleeping code requires that we have PSTATE.IE cleared over
|
||||
* the cpu sleep hypervisor call.
|
||||
*/
|
||||
__asm__ __volatile__(
|
||||
"rdpr %%pstate, %0\n\t"
|
||||
"andn %0, %1, %0\n\t"
|
||||
@@ -73,7 +70,7 @@ static void sparc64_yield(int cpu)
|
||||
: "=&r" (pstate)
|
||||
: "i" (PSTATE_IE));
|
||||
|
||||
if (!need_resched() && !cpu_is_offline(cpu))
|
||||
if (!need_resched() && !cpu_is_offline(smp_processor_id()))
|
||||
sun4v_cpu_yield();
|
||||
|
||||
/* Re-enable interrupts. */
|
||||
@@ -84,36 +81,16 @@ static void sparc64_yield(int cpu)
|
||||
: "=&r" (pstate)
|
||||
: "i" (PSTATE_IE));
|
||||
}
|
||||
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/* The idle loop on sparc64. */
|
||||
void cpu_idle(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
|
||||
while(1) {
|
||||
tick_nohz_idle_enter();
|
||||
rcu_idle_enter();
|
||||
|
||||
while (!need_resched() && !cpu_is_offline(cpu))
|
||||
sparc64_yield(cpu);
|
||||
|
||||
rcu_idle_exit();
|
||||
tick_nohz_idle_exit();
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
if (cpu_is_offline(cpu)) {
|
||||
sched_preempt_enable_no_resched();
|
||||
cpu_play_dead();
|
||||
}
|
||||
#endif
|
||||
schedule_preempt_disabled();
|
||||
}
|
||||
void arch_cpu_idle_dead()
|
||||
{
|
||||
sched_preempt_enable_no_resched();
|
||||
cpu_play_dead();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static void show_regwindow32(struct pt_regs *regs)
|
||||
@@ -186,6 +163,8 @@ static void show_regwindow(struct pt_regs *regs)
|
||||
|
||||
void show_regs(struct pt_regs *regs)
|
||||
{
|
||||
show_regs_print_info(KERN_DEFAULT);
|
||||
|
||||
printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
|
||||
regs->tpc, regs->tnpc, regs->y, print_tainted());
|
||||
printk("TPC: <%pS>\n", (void *) regs->tpc);
|
||||
@@ -315,7 +294,7 @@ static void sysrq_handle_globreg(int key)
|
||||
|
||||
static struct sysrq_key_op sparc_globalreg_op = {
|
||||
.handler = sysrq_handle_globreg,
|
||||
.help_msg = "global-regs(Y)",
|
||||
.help_msg = "global-regs(y)",
|
||||
.action_msg = "Show Global CPU Regs",
|
||||
};
|
||||
|
||||
@@ -385,7 +364,7 @@ static void sysrq_handle_globpmu(int key)
|
||||
|
||||
static struct sysrq_key_op sparc_globalpmu_op = {
|
||||
.handler = sysrq_handle_globpmu,
|
||||
.help_msg = "global-pmu(X)",
|
||||
.help_msg = "global-pmu(x)",
|
||||
.action_msg = "Show Global PMU Regs",
|
||||
};
|
||||
|
||||
|
@@ -369,7 +369,7 @@ void __cpuinit sparc_start_secondary(void *arg)
|
||||
local_irq_enable();
|
||||
|
||||
wmb();
|
||||
cpu_idle();
|
||||
cpu_startup_entry(CPUHP_ONLINE);
|
||||
|
||||
/* We should never reach here! */
|
||||
BUG();
|
||||
|
@@ -127,6 +127,8 @@ void __cpuinit smp_callin(void)
|
||||
|
||||
/* idle thread is expected to have preempt disabled */
|
||||
preempt_disable();
|
||||
|
||||
cpu_startup_entry(CPUHP_ONLINE);
|
||||
}
|
||||
|
||||
void cpu_panic(void)
|
||||
@@ -849,7 +851,7 @@ void smp_tsb_sync(struct mm_struct *mm)
|
||||
}
|
||||
|
||||
extern unsigned long xcall_flush_tlb_mm;
|
||||
extern unsigned long xcall_flush_tlb_pending;
|
||||
extern unsigned long xcall_flush_tlb_page;
|
||||
extern unsigned long xcall_flush_tlb_kernel_range;
|
||||
extern unsigned long xcall_fetch_glob_regs;
|
||||
extern unsigned long xcall_fetch_glob_pmu;
|
||||
@@ -1074,19 +1076,52 @@ local_flush_and_out:
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
struct tlb_pending_info {
|
||||
unsigned long ctx;
|
||||
unsigned long nr;
|
||||
unsigned long *vaddrs;
|
||||
};
|
||||
|
||||
static void tlb_pending_func(void *info)
|
||||
{
|
||||
struct tlb_pending_info *t = info;
|
||||
|
||||
__flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
|
||||
}
|
||||
|
||||
void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
|
||||
{
|
||||
u32 ctx = CTX_HWBITS(mm->context);
|
||||
struct tlb_pending_info info;
|
||||
int cpu = get_cpu();
|
||||
|
||||
info.ctx = ctx;
|
||||
info.nr = nr;
|
||||
info.vaddrs = vaddrs;
|
||||
|
||||
if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
|
||||
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
|
||||
else
|
||||
smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
|
||||
&info, 1);
|
||||
|
||||
__flush_tlb_pending(ctx, nr, vaddrs);
|
||||
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
|
||||
{
|
||||
unsigned long context = CTX_HWBITS(mm->context);
|
||||
int cpu = get_cpu();
|
||||
|
||||
if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
|
||||
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
|
||||
else
|
||||
smp_cross_call_masked(&xcall_flush_tlb_pending,
|
||||
ctx, nr, (unsigned long) vaddrs,
|
||||
smp_cross_call_masked(&xcall_flush_tlb_page,
|
||||
context, vaddr, 0,
|
||||
mm_cpumask(mm));
|
||||
|
||||
__flush_tlb_pending(ctx, nr, vaddrs);
|
||||
__flush_tlb_page(context, vaddr);
|
||||
|
||||
put_cpu();
|
||||
}
|
||||
|
@@ -6,6 +6,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include <asm/timer.h>
|
||||
|
@@ -36,7 +36,6 @@ STUB: sra REG1, 0, REG1; \
|
||||
jmpl %g1 + %lo(SYSCALL), %g0; \
|
||||
sra REG3, 0, REG3
|
||||
|
||||
SIGN1(sys32_getrusage, compat_sys_getrusage, %o0)
|
||||
SIGN1(sys32_readahead, compat_sys_readahead, %o0)
|
||||
SIGN2(sys32_fadvise64, compat_sys_fadvise64, %o0, %o4)
|
||||
SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5)
|
||||
@@ -46,12 +45,9 @@ SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
|
||||
SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
|
||||
SIGN1(sys32_select, compat_sys_select, %o0)
|
||||
SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
|
||||
SIGN2(sys32_sendfile, compat_sys_sendfile, %o0, %o1)
|
||||
SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
|
||||
SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
|
||||
SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
|
||||
SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5)
|
||||
SIGN1(sys32_vmsplice, compat_sys_vmsplice, %o0)
|
||||
|
||||
.globl sys32_mmap2
|
||||
sys32_mmap2:
|
||||
|
@@ -49,71 +49,6 @@
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/compat_signal.h>
|
||||
|
||||
#ifdef CONFIG_SYSVIPC
|
||||
asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr, u32 fifth)
|
||||
{
|
||||
int version;
|
||||
|
||||
version = call >> 16; /* hack for backward compatibility */
|
||||
call &= 0xffff;
|
||||
|
||||
switch (call) {
|
||||
case SEMTIMEDOP:
|
||||
if (fifth)
|
||||
/* sign extend semid */
|
||||
return compat_sys_semtimedop((int)first,
|
||||
compat_ptr(ptr), second,
|
||||
compat_ptr(fifth));
|
||||
/* else fall through for normal semop() */
|
||||
case SEMOP:
|
||||
/* struct sembuf is the same on 32 and 64bit :)) */
|
||||
/* sign extend semid */
|
||||
return sys_semtimedop((int)first, compat_ptr(ptr), second,
|
||||
NULL);
|
||||
case SEMGET:
|
||||
/* sign extend key, nsems */
|
||||
return sys_semget((int)first, (int)second, third);
|
||||
case SEMCTL:
|
||||
/* sign extend semid, semnum */
|
||||
return compat_sys_semctl((int)first, (int)second, third,
|
||||
compat_ptr(ptr));
|
||||
|
||||
case MSGSND:
|
||||
/* sign extend msqid */
|
||||
return compat_sys_msgsnd((int)first, (int)second, third,
|
||||
compat_ptr(ptr));
|
||||
case MSGRCV:
|
||||
/* sign extend msqid, msgtyp */
|
||||
return compat_sys_msgrcv((int)first, second, (int)fifth,
|
||||
third, version, compat_ptr(ptr));
|
||||
case MSGGET:
|
||||
/* sign extend key */
|
||||
return sys_msgget((int)first, second);
|
||||
case MSGCTL:
|
||||
/* sign extend msqid */
|
||||
return compat_sys_msgctl((int)first, second, compat_ptr(ptr));
|
||||
|
||||
case SHMAT:
|
||||
/* sign extend shmid */
|
||||
return compat_sys_shmat((int)first, second, third, version,
|
||||
compat_ptr(ptr));
|
||||
case SHMDT:
|
||||
return sys_shmdt(compat_ptr(ptr));
|
||||
case SHMGET:
|
||||
/* sign extend key_t */
|
||||
return sys_shmget((int)first, second, third);
|
||||
case SHMCTL:
|
||||
/* sign extend shmid */
|
||||
return compat_sys_shmctl((int)first, second, compat_ptr(ptr));
|
||||
|
||||
default:
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
return -ENOSYS;
|
||||
}
|
||||
#endif
|
||||
|
||||
asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low)
|
||||
{
|
||||
if ((int)high < 0)
|
||||
@@ -303,15 +238,7 @@ long compat_sys_fadvise64_64(int fd,
|
||||
advice);
|
||||
}
|
||||
|
||||
long sys32_lookup_dcookie(unsigned long cookie_high,
|
||||
unsigned long cookie_low,
|
||||
char __user *buf, size_t len)
|
||||
{
|
||||
return sys_lookup_dcookie((cookie_high << 32) | cookie_low,
|
||||
buf, len);
|
||||
}
|
||||
|
||||
long compat_sync_file_range(int fd, unsigned long off_high, unsigned long off_low, unsigned long nb_high, unsigned long nb_low, int flags)
|
||||
long sys32_sync_file_range(unsigned int fd, unsigned long off_high, unsigned long off_low, unsigned long nb_high, unsigned long nb_low, unsigned int flags)
|
||||
{
|
||||
return sys_sync_file_range(fd,
|
||||
(off_high << 32) | off_low,
|
||||
|
@@ -353,7 +353,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
|
||||
case SEMCTL: {
|
||||
err = sys_semctl(first, second,
|
||||
(int)third | IPC_64,
|
||||
(union semun) ptr);
|
||||
(unsigned long) ptr);
|
||||
goto out;
|
||||
}
|
||||
default:
|
||||
@@ -470,10 +470,6 @@ SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
|
||||
|
||||
return vm_munmap(addr, len);
|
||||
}
|
||||
|
||||
extern unsigned long do_mremap(unsigned long addr,
|
||||
unsigned long old_len, unsigned long new_len,
|
||||
unsigned long flags, unsigned long new_addr);
|
||||
|
||||
SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len,
|
||||
unsigned long, new_len, unsigned long, flags,
|
||||
|
@@ -23,9 +23,9 @@ sys_call_table32:
|
||||
/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
|
||||
/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, compat_sys_lseek
|
||||
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
|
||||
/*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, compat_sys_sigaltstack, sys_pause
|
||||
/*25*/ .word compat_sys_vmsplice, compat_sys_ptrace, sys_alarm, compat_sys_sigaltstack, sys_pause
|
||||
/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
|
||||
.word sys_chown, sys_sync, sys_kill, compat_sys_newstat, sys32_sendfile
|
||||
.word sys_chown, sys_sync, sys_kill, compat_sys_newstat, compat_sys_sendfile
|
||||
/*40*/ .word compat_sys_newlstat, sys_dup, sys_sparc_pipe, compat_sys_times, sys_getuid
|
||||
.word sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16
|
||||
/*50*/ .word sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, compat_sys_ioctl
|
||||
@@ -41,7 +41,7 @@ sys_call_table32:
|
||||
/*100*/ .word sys_getpriority, sys32_rt_sigreturn, compat_sys_rt_sigaction, compat_sys_rt_sigprocmask, compat_sys_rt_sigpending
|
||||
.word compat_sys_rt_sigtimedwait, compat_sys_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid
|
||||
/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
|
||||
.word sys_getgroups, compat_sys_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd
|
||||
.word sys_getgroups, compat_sys_gettimeofday, compat_sys_getrusage, sys_nis_syscall, sys_getcwd
|
||||
/*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod
|
||||
.word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
|
||||
/*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall
|
||||
@@ -59,7 +59,7 @@ sys_call_table32:
|
||||
/*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
|
||||
.word sys_epoll_wait, sys_ioprio_set, sys_getppid, compat_sys_sparc_sigaction, sys_sgetmask
|
||||
/*200*/ .word sys_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir
|
||||
.word sys32_readahead, sys32_socketcall, sys_syslog, sys32_lookup_dcookie, sys32_fadvise64
|
||||
.word sys32_readahead, sys32_socketcall, sys_syslog, compat_sys_lookup_dcookie, sys32_fadvise64
|
||||
/*210*/ .word sys32_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, compat_sys_sysinfo
|
||||
.word compat_sys_ipc, sys32_sigreturn, sys_clone, sys_ioprio_get, compat_sys_adjtimex
|
||||
/*220*/ .word compat_sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
|
||||
|
@@ -407,8 +407,7 @@ after_lock_tlb:
|
||||
|
||||
call smp_callin
|
||||
nop
|
||||
call cpu_idle
|
||||
mov 0, %o0
|
||||
|
||||
call cpu_panic
|
||||
nop
|
||||
1: b,a,pt %xcc, 1b
|
||||
|
@@ -2350,13 +2350,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
|
||||
} while (++count < 16);
|
||||
}
|
||||
|
||||
void dump_stack(void)
|
||||
{
|
||||
show_stack(current, NULL);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(dump_stack);
|
||||
|
||||
static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
|
||||
{
|
||||
unsigned long fp = rw->ins[6];
|
||||
|
@@ -1,413 +0,0 @@
|
||||
/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
|
||||
*
|
||||
* Copyright (C) 2003 David S. Miller (davem@redhat.com)
|
||||
*
|
||||
* Many thanks to Dominik Brodowski for fixing up the cpufreq
|
||||
* infrastructure in order to make this driver easier to implement.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/asi.h>
|
||||
#include <asm/timer.h>
|
||||
|
||||
static struct cpufreq_driver *cpufreq_us2e_driver;
|
||||
|
||||
struct us2e_freq_percpu_info {
|
||||
struct cpufreq_frequency_table table[6];
|
||||
};
|
||||
|
||||
/* Indexed by cpu number. */
|
||||
static struct us2e_freq_percpu_info *us2e_freq_table;
|
||||
|
||||
#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
|
||||
#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
|
||||
|
||||
/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
|
||||
* in the ESTAR mode control register.
|
||||
*/
|
||||
#define ESTAR_MODE_DIV_1 0x0000000000000000UL
|
||||
#define ESTAR_MODE_DIV_2 0x0000000000000001UL
|
||||
#define ESTAR_MODE_DIV_4 0x0000000000000003UL
|
||||
#define ESTAR_MODE_DIV_6 0x0000000000000002UL
|
||||
#define ESTAR_MODE_DIV_8 0x0000000000000004UL
|
||||
#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
|
||||
|
||||
#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
|
||||
#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
|
||||
#define MCTRL0_REFR_COUNT_SHIFT 8
|
||||
#define MCTRL0_REFR_INTERVAL 7800
|
||||
#define MCTRL0_REFR_CLKS_P_CNT 64
|
||||
|
||||
static unsigned long read_hbreg(unsigned long addr)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
__asm__ __volatile__("ldxa [%1] %2, %0"
|
||||
: "=&r" (ret)
|
||||
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void write_hbreg(unsigned long addr, unsigned long val)
|
||||
{
|
||||
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
|
||||
"membar #Sync"
|
||||
: /* no outputs */
|
||||
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
|
||||
: "memory");
|
||||
if (addr == HBIRD_ESTAR_MODE_ADDR) {
|
||||
/* Need to wait 16 clock cycles for the PLL to lock. */
|
||||
udelay(1);
|
||||
}
|
||||
}
|
||||
|
||||
static void self_refresh_ctl(int enable)
|
||||
{
|
||||
unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
|
||||
|
||||
if (enable)
|
||||
mctrl |= MCTRL0_SREFRESH_ENAB;
|
||||
else
|
||||
mctrl &= ~MCTRL0_SREFRESH_ENAB;
|
||||
write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
|
||||
(void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
|
||||
}
|
||||
|
||||
static void frob_mem_refresh(int cpu_slowing_down,
|
||||
unsigned long clock_tick,
|
||||
unsigned long old_divisor, unsigned long divisor)
|
||||
{
|
||||
unsigned long old_refr_count, refr_count, mctrl;
|
||||
|
||||
refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
|
||||
refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
|
||||
|
||||
mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
|
||||
old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
|
||||
>> MCTRL0_REFR_COUNT_SHIFT;
|
||||
|
||||
mctrl &= ~MCTRL0_REFR_COUNT_MASK;
|
||||
mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
|
||||
write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
|
||||
mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
|
||||
|
||||
if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
|
||||
unsigned long usecs;
|
||||
|
||||
/* We have to wait for both refresh counts (old
|
||||
* and new) to go to zero.
|
||||
*/
|
||||
usecs = (MCTRL0_REFR_CLKS_P_CNT *
|
||||
(refr_count + old_refr_count) *
|
||||
1000000UL *
|
||||
old_divisor) / clock_tick;
|
||||
udelay(usecs + 1UL);
|
||||
}
|
||||
}
|
||||
|
||||
static void us2e_transition(unsigned long estar, unsigned long new_bits,
|
||||
unsigned long clock_tick,
|
||||
unsigned long old_divisor, unsigned long divisor)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
estar &= ~ESTAR_MODE_DIV_MASK;
|
||||
|
||||
/* This is based upon the state transition diagram in the IIe manual. */
|
||||
if (old_divisor == 2 && divisor == 1) {
|
||||
self_refresh_ctl(0);
|
||||
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
|
||||
frob_mem_refresh(0, clock_tick, old_divisor, divisor);
|
||||
} else if (old_divisor == 1 && divisor == 2) {
|
||||
frob_mem_refresh(1, clock_tick, old_divisor, divisor);
|
||||
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
|
||||
self_refresh_ctl(1);
|
||||
} else if (old_divisor == 1 && divisor > 2) {
|
||||
us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
|
||||
1, 2);
|
||||
us2e_transition(estar, new_bits, clock_tick,
|
||||
2, divisor);
|
||||
} else if (old_divisor > 2 && divisor == 1) {
|
||||
us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
|
||||
old_divisor, 2);
|
||||
us2e_transition(estar, new_bits, clock_tick,
|
||||
2, divisor);
|
||||
} else if (old_divisor < divisor) {
|
||||
frob_mem_refresh(0, clock_tick, old_divisor, divisor);
|
||||
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
|
||||
} else if (old_divisor > divisor) {
|
||||
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
|
||||
frob_mem_refresh(1, clock_tick, old_divisor, divisor);
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static unsigned long index_to_estar_mode(unsigned int index)
|
||||
{
|
||||
switch (index) {
|
||||
case 0:
|
||||
return ESTAR_MODE_DIV_1;
|
||||
|
||||
case 1:
|
||||
return ESTAR_MODE_DIV_2;
|
||||
|
||||
case 2:
|
||||
return ESTAR_MODE_DIV_4;
|
||||
|
||||
case 3:
|
||||
return ESTAR_MODE_DIV_6;
|
||||
|
||||
case 4:
|
||||
return ESTAR_MODE_DIV_8;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long index_to_divisor(unsigned int index)
|
||||
{
|
||||
switch (index) {
|
||||
case 0:
|
||||
return 1;
|
||||
|
||||
case 1:
|
||||
return 2;
|
||||
|
||||
case 2:
|
||||
return 4;
|
||||
|
||||
case 3:
|
||||
return 6;
|
||||
|
||||
case 4:
|
||||
return 8;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long estar_to_divisor(unsigned long estar)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
switch (estar & ESTAR_MODE_DIV_MASK) {
|
||||
case ESTAR_MODE_DIV_1:
|
||||
ret = 1;
|
||||
break;
|
||||
case ESTAR_MODE_DIV_2:
|
||||
ret = 2;
|
||||
break;
|
||||
case ESTAR_MODE_DIV_4:
|
||||
ret = 4;
|
||||
break;
|
||||
case ESTAR_MODE_DIV_6:
|
||||
ret = 6;
|
||||
break;
|
||||
case ESTAR_MODE_DIV_8:
|
||||
ret = 8;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned int us2e_freq_get(unsigned int cpu)
|
||||
{
|
||||
cpumask_t cpus_allowed;
|
||||
unsigned long clock_tick, estar;
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
return 0;
|
||||
|
||||
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
|
||||
set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
||||
|
||||
clock_tick = sparc64_get_clock_tick(cpu) / 1000;
|
||||
estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
|
||||
|
||||
set_cpus_allowed_ptr(current, &cpus_allowed);
|
||||
|
||||
return clock_tick / estar_to_divisor(estar);
|
||||
}
|
||||
|
||||
static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
|
||||
{
|
||||
unsigned long new_bits, new_freq;
|
||||
unsigned long clock_tick, divisor, old_divisor, estar;
|
||||
cpumask_t cpus_allowed;
|
||||
struct cpufreq_freqs freqs;
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
return;
|
||||
|
||||
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
|
||||
set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
||||
|
||||
new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
|
||||
new_bits = index_to_estar_mode(index);
|
||||
divisor = index_to_divisor(index);
|
||||
new_freq /= divisor;
|
||||
|
||||
estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
|
||||
|
||||
old_divisor = estar_to_divisor(estar);
|
||||
|
||||
freqs.old = clock_tick / old_divisor;
|
||||
freqs.new = new_freq;
|
||||
freqs.cpu = cpu;
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
|
||||
|
||||
if (old_divisor != divisor)
|
||||
us2e_transition(estar, new_bits, clock_tick * 1000,
|
||||
old_divisor, divisor);
|
||||
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
|
||||
|
||||
set_cpus_allowed_ptr(current, &cpus_allowed);
|
||||
}
|
||||
|
||||
static int us2e_freq_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation)
|
||||
{
|
||||
unsigned int new_index = 0;
|
||||
|
||||
if (cpufreq_frequency_table_target(policy,
|
||||
&us2e_freq_table[policy->cpu].table[0],
|
||||
target_freq, relation, &new_index))
|
||||
return -EINVAL;
|
||||
|
||||
us2e_set_cpu_divider_index(policy->cpu, new_index);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int us2e_freq_verify(struct cpufreq_policy *policy)
|
||||
{
|
||||
return cpufreq_frequency_table_verify(policy,
|
||||
&us2e_freq_table[policy->cpu].table[0]);
|
||||
}
|
||||
|
||||
static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int cpu = policy->cpu;
|
||||
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
|
||||
struct cpufreq_frequency_table *table =
|
||||
&us2e_freq_table[cpu].table[0];
|
||||
|
||||
table[0].index = 0;
|
||||
table[0].frequency = clock_tick / 1;
|
||||
table[1].index = 1;
|
||||
table[1].frequency = clock_tick / 2;
|
||||
table[2].index = 2;
|
||||
table[2].frequency = clock_tick / 4;
|
||||
table[2].index = 3;
|
||||
table[2].frequency = clock_tick / 6;
|
||||
table[2].index = 4;
|
||||
table[2].frequency = clock_tick / 8;
|
||||
table[2].index = 5;
|
||||
table[3].frequency = CPUFREQ_TABLE_END;
|
||||
|
||||
policy->cpuinfo.transition_latency = 0;
|
||||
policy->cur = clock_tick;
|
||||
|
||||
return cpufreq_frequency_table_cpuinfo(policy, table);
|
||||
}
|
||||
|
||||
static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
if (cpufreq_us2e_driver)
|
||||
us2e_set_cpu_divider_index(policy->cpu, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init us2e_freq_init(void)
|
||||
{
|
||||
unsigned long manuf, impl, ver;
|
||||
int ret;
|
||||
|
||||
if (tlb_type != spitfire)
|
||||
return -ENODEV;
|
||||
|
||||
__asm__("rdpr %%ver, %0" : "=r" (ver));
|
||||
manuf = ((ver >> 48) & 0xffff);
|
||||
impl = ((ver >> 32) & 0xffff);
|
||||
|
||||
if (manuf == 0x17 && impl == 0x13) {
|
||||
struct cpufreq_driver *driver;
|
||||
|
||||
ret = -ENOMEM;
|
||||
driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
|
||||
if (!driver)
|
||||
goto err_out;
|
||||
|
||||
us2e_freq_table = kzalloc(
|
||||
(NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
|
||||
GFP_KERNEL);
|
||||
if (!us2e_freq_table)
|
||||
goto err_out;
|
||||
|
||||
driver->init = us2e_freq_cpu_init;
|
||||
driver->verify = us2e_freq_verify;
|
||||
driver->target = us2e_freq_target;
|
||||
driver->get = us2e_freq_get;
|
||||
driver->exit = us2e_freq_cpu_exit;
|
||||
driver->owner = THIS_MODULE,
|
||||
strcpy(driver->name, "UltraSPARC-IIe");
|
||||
|
||||
cpufreq_us2e_driver = driver;
|
||||
ret = cpufreq_register_driver(driver);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
if (driver) {
|
||||
kfree(driver);
|
||||
cpufreq_us2e_driver = NULL;
|
||||
}
|
||||
kfree(us2e_freq_table);
|
||||
us2e_freq_table = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void __exit us2e_freq_exit(void)
|
||||
{
|
||||
if (cpufreq_us2e_driver) {
|
||||
cpufreq_unregister_driver(cpufreq_us2e_driver);
|
||||
kfree(cpufreq_us2e_driver);
|
||||
cpufreq_us2e_driver = NULL;
|
||||
kfree(us2e_freq_table);
|
||||
us2e_freq_table = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
|
||||
MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
module_init(us2e_freq_init);
|
||||
module_exit(us2e_freq_exit);
|
@@ -1,274 +0,0 @@
|
||||
/* us3_cpufreq.c: UltraSPARC-III cpu frequency support
|
||||
*
|
||||
* Copyright (C) 2003 David S. Miller (davem@redhat.com)
|
||||
*
|
||||
* Many thanks to Dominik Brodowski for fixing up the cpufreq
|
||||
* infrastructure in order to make this driver easier to implement.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/head.h>
|
||||
#include <asm/timer.h>
|
||||
|
||||
static struct cpufreq_driver *cpufreq_us3_driver;
|
||||
|
||||
struct us3_freq_percpu_info {
|
||||
struct cpufreq_frequency_table table[4];
|
||||
};
|
||||
|
||||
/* Indexed by cpu number. */
|
||||
static struct us3_freq_percpu_info *us3_freq_table;
|
||||
|
||||
/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
|
||||
* in the Safari config register.
|
||||
*/
|
||||
#define SAFARI_CFG_DIV_1 0x0000000000000000UL
|
||||
#define SAFARI_CFG_DIV_2 0x0000000040000000UL
|
||||
#define SAFARI_CFG_DIV_32 0x0000000080000000UL
|
||||
#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL
|
||||
|
||||
static unsigned long read_safari_cfg(void)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
__asm__ __volatile__("ldxa [%%g0] %1, %0"
|
||||
: "=&r" (ret)
|
||||
: "i" (ASI_SAFARI_CONFIG));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void write_safari_cfg(unsigned long val)
|
||||
{
|
||||
__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
|
||||
"membar #Sync"
|
||||
: /* no outputs */
|
||||
: "r" (val), "i" (ASI_SAFARI_CONFIG)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
|
||||
{
|
||||
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
|
||||
unsigned long ret;
|
||||
|
||||
switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
|
||||
case SAFARI_CFG_DIV_1:
|
||||
ret = clock_tick / 1;
|
||||
break;
|
||||
case SAFARI_CFG_DIV_2:
|
||||
ret = clock_tick / 2;
|
||||
break;
|
||||
case SAFARI_CFG_DIV_32:
|
||||
ret = clock_tick / 32;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned int us3_freq_get(unsigned int cpu)
|
||||
{
|
||||
cpumask_t cpus_allowed;
|
||||
unsigned long reg;
|
||||
unsigned int ret;
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
return 0;
|
||||
|
||||
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
|
||||
set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
||||
|
||||
reg = read_safari_cfg();
|
||||
ret = get_current_freq(cpu, reg);
|
||||
|
||||
set_cpus_allowed_ptr(current, &cpus_allowed);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
|
||||
{
|
||||
unsigned long new_bits, new_freq, reg;
|
||||
cpumask_t cpus_allowed;
|
||||
struct cpufreq_freqs freqs;
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
return;
|
||||
|
||||
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
|
||||
set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
||||
|
||||
new_freq = sparc64_get_clock_tick(cpu) / 1000;
|
||||
switch (index) {
|
||||
case 0:
|
||||
new_bits = SAFARI_CFG_DIV_1;
|
||||
new_freq /= 1;
|
||||
break;
|
||||
case 1:
|
||||
new_bits = SAFARI_CFG_DIV_2;
|
||||
new_freq /= 2;
|
||||
break;
|
||||
case 2:
|
||||
new_bits = SAFARI_CFG_DIV_32;
|
||||
new_freq /= 32;
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
reg = read_safari_cfg();
|
||||
|
||||
freqs.old = get_current_freq(cpu, reg);
|
||||
freqs.new = new_freq;
|
||||
freqs.cpu = cpu;
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
|
||||
|
||||
reg &= ~SAFARI_CFG_DIV_MASK;
|
||||
reg |= new_bits;
|
||||
write_safari_cfg(reg);
|
||||
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
|
||||
|
||||
set_cpus_allowed_ptr(current, &cpus_allowed);
|
||||
}
|
||||
|
||||
static int us3_freq_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation)
|
||||
{
|
||||
unsigned int new_index = 0;
|
||||
|
||||
if (cpufreq_frequency_table_target(policy,
|
||||
&us3_freq_table[policy->cpu].table[0],
|
||||
target_freq,
|
||||
relation,
|
||||
&new_index))
|
||||
return -EINVAL;
|
||||
|
||||
us3_set_cpu_divider_index(policy->cpu, new_index);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int us3_freq_verify(struct cpufreq_policy *policy)
|
||||
{
|
||||
return cpufreq_frequency_table_verify(policy,
|
||||
&us3_freq_table[policy->cpu].table[0]);
|
||||
}
|
||||
|
||||
static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int cpu = policy->cpu;
|
||||
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
|
||||
struct cpufreq_frequency_table *table =
|
||||
&us3_freq_table[cpu].table[0];
|
||||
|
||||
table[0].index = 0;
|
||||
table[0].frequency = clock_tick / 1;
|
||||
table[1].index = 1;
|
||||
table[1].frequency = clock_tick / 2;
|
||||
table[2].index = 2;
|
||||
table[2].frequency = clock_tick / 32;
|
||||
table[3].index = 0;
|
||||
table[3].frequency = CPUFREQ_TABLE_END;
|
||||
|
||||
policy->cpuinfo.transition_latency = 0;
|
||||
policy->cur = clock_tick;
|
||||
|
||||
return cpufreq_frequency_table_cpuinfo(policy, table);
|
||||
}
|
||||
|
||||
static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
if (cpufreq_us3_driver)
|
||||
us3_set_cpu_divider_index(policy->cpu, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init us3_freq_init(void)
|
||||
{
|
||||
unsigned long manuf, impl, ver;
|
||||
int ret;
|
||||
|
||||
if (tlb_type != cheetah && tlb_type != cheetah_plus)
|
||||
return -ENODEV;
|
||||
|
||||
__asm__("rdpr %%ver, %0" : "=r" (ver));
|
||||
manuf = ((ver >> 48) & 0xffff);
|
||||
impl = ((ver >> 32) & 0xffff);
|
||||
|
||||
if (manuf == CHEETAH_MANUF &&
|
||||
(impl == CHEETAH_IMPL ||
|
||||
impl == CHEETAH_PLUS_IMPL ||
|
||||
impl == JAGUAR_IMPL ||
|
||||
impl == PANTHER_IMPL)) {
|
||||
struct cpufreq_driver *driver;
|
||||
|
||||
ret = -ENOMEM;
|
||||
driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
|
||||
if (!driver)
|
||||
goto err_out;
|
||||
|
||||
us3_freq_table = kzalloc(
|
||||
(NR_CPUS * sizeof(struct us3_freq_percpu_info)),
|
||||
GFP_KERNEL);
|
||||
if (!us3_freq_table)
|
||||
goto err_out;
|
||||
|
||||
driver->init = us3_freq_cpu_init;
|
||||
driver->verify = us3_freq_verify;
|
||||
driver->target = us3_freq_target;
|
||||
driver->get = us3_freq_get;
|
||||
driver->exit = us3_freq_cpu_exit;
|
||||
driver->owner = THIS_MODULE,
|
||||
strcpy(driver->name, "UltraSPARC-III");
|
||||
|
||||
cpufreq_us3_driver = driver;
|
||||
ret = cpufreq_register_driver(driver);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
if (driver) {
|
||||
kfree(driver);
|
||||
cpufreq_us3_driver = NULL;
|
||||
}
|
||||
kfree(us3_freq_table);
|
||||
us3_freq_table = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void __exit us3_freq_exit(void)
|
||||
{
|
||||
if (cpufreq_us3_driver) {
|
||||
cpufreq_unregister_driver(cpufreq_us3_driver);
|
||||
kfree(cpufreq_us3_driver);
|
||||
cpufreq_us3_driver = NULL;
|
||||
kfree(us3_freq_table);
|
||||
us3_freq_table = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
|
||||
MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
module_init(us3_freq_init);
|
||||
module_exit(us3_freq_exit);
|
@@ -342,6 +342,7 @@ static void vio_remove(struct mdesc_handle *hp, u64 node)
|
||||
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
|
||||
|
||||
device_unregister(dev);
|
||||
put_device(dev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -45,4 +45,3 @@ obj-y += iomap.o
|
||||
obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o
|
||||
obj-y += ksyms.o
|
||||
obj-$(CONFIG_SPARC64) += PeeCeeI.o
|
||||
obj-y += usercopy.o
|
||||
|
@@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len)
|
||||
|
||||
void bit_map_init(struct bit_map *t, unsigned long *map, int size)
|
||||
{
|
||||
|
||||
if ((size & 07) != 0)
|
||||
BUG();
|
||||
memset(map, 0, size>>3);
|
||||
|
||||
bitmap_zero(map, size);
|
||||
memset(t, 0, sizeof *t);
|
||||
spin_lock_init(&t->lock);
|
||||
t->map = map;
|
||||
|
@@ -1,9 +0,0 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bug.h>
|
||||
|
||||
void copy_from_user_overflow(void)
|
||||
{
|
||||
WARN(1, "Buffer overflow detected!\n");
|
||||
}
|
||||
EXPORT_SYMBOL(copy_from_user_overflow);
|
@@ -282,14 +282,8 @@ static void map_high_region(unsigned long start_pfn, unsigned long end_pfn)
|
||||
printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn);
|
||||
#endif
|
||||
|
||||
for (tmp = start_pfn; tmp < end_pfn; tmp++) {
|
||||
struct page *page = pfn_to_page(tmp);
|
||||
|
||||
ClearPageReserved(page);
|
||||
init_page_count(page);
|
||||
__free_page(page);
|
||||
totalhigh_pages++;
|
||||
}
|
||||
for (tmp = start_pfn; tmp < end_pfn; tmp++)
|
||||
free_highmem_page(pfn_to_page(tmp));
|
||||
}
|
||||
|
||||
void __init mem_init(void)
|
||||
@@ -347,8 +341,6 @@ void __init mem_init(void)
|
||||
map_high_region(start_pfn, end_pfn);
|
||||
}
|
||||
|
||||
totalram_pages += totalhigh_pages;
|
||||
|
||||
codepages = (((unsigned long) &_etext) - ((unsigned long)&_start));
|
||||
codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
|
||||
datapages = (((unsigned long) &_edata) - ((unsigned long)&_etext));
|
||||
|
@@ -2178,10 +2178,9 @@ unsigned long vmemmap_table[VMEMMAP_SIZE];
|
||||
static long __meminitdata addr_start, addr_end;
|
||||
static int __meminitdata node_start;
|
||||
|
||||
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
|
||||
int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
|
||||
int node)
|
||||
{
|
||||
unsigned long vstart = (unsigned long) start;
|
||||
unsigned long vend = (unsigned long) (start + nr);
|
||||
unsigned long phys_start = (vstart - VMEMMAP_BASE);
|
||||
unsigned long phys_end = (vend - VMEMMAP_BASE);
|
||||
unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
|
||||
@@ -2233,7 +2232,7 @@ void __meminit vmemmap_populate_print_last(void)
|
||||
}
|
||||
}
|
||||
|
||||
void vmemmap_free(struct page *memmap, unsigned long nr_pages)
|
||||
void vmemmap_free(unsigned long start, unsigned long end)
|
||||
{
|
||||
}
|
||||
|
||||
|
@@ -34,7 +34,7 @@
|
||||
#define IOMMU_RNGE IOMMU_RNGE_256MB
|
||||
#define IOMMU_START 0xF0000000
|
||||
#define IOMMU_WINSIZE (256*1024*1024U)
|
||||
#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
|
||||
#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
|
||||
#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
|
||||
|
||||
/* srmmu.c */
|
||||
|
@@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void)
|
||||
SRMMU_NOCACHE_ALIGN_MAX, 0UL);
|
||||
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
|
||||
|
||||
srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
|
||||
srmmu_nocache_bitmap =
|
||||
__alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
|
||||
SMP_CACHE_BYTES, 0UL);
|
||||
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
|
||||
|
||||
srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
|
||||
|
@@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
|
||||
void flush_tlb_pending(void)
|
||||
{
|
||||
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
|
||||
struct mm_struct *mm = tb->mm;
|
||||
|
||||
if (tb->tlb_nr) {
|
||||
flush_tsb_user(tb);
|
||||
if (!tb->tlb_nr)
|
||||
goto out;
|
||||
|
||||
if (CTX_VALID(tb->mm->context)) {
|
||||
flush_tsb_user(tb);
|
||||
|
||||
if (CTX_VALID(mm->context)) {
|
||||
if (tb->tlb_nr == 1) {
|
||||
global_flush_tlb_page(mm, tb->vaddrs[0]);
|
||||
} else {
|
||||
#ifdef CONFIG_SMP
|
||||
smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
|
||||
&tb->vaddrs[0]);
|
||||
@@ -37,12 +43,30 @@ void flush_tlb_pending(void)
|
||||
tb->tlb_nr, &tb->vaddrs[0]);
|
||||
#endif
|
||||
}
|
||||
tb->tlb_nr = 0;
|
||||
}
|
||||
|
||||
tb->tlb_nr = 0;
|
||||
|
||||
out:
|
||||
put_cpu_var(tlb_batch);
|
||||
}
|
||||
|
||||
void arch_enter_lazy_mmu_mode(void)
|
||||
{
|
||||
struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
|
||||
|
||||
tb->active = 1;
|
||||
}
|
||||
|
||||
void arch_leave_lazy_mmu_mode(void)
|
||||
{
|
||||
struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
|
||||
|
||||
if (tb->tlb_nr)
|
||||
flush_tlb_pending();
|
||||
tb->active = 0;
|
||||
}
|
||||
|
||||
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
||||
bool exec)
|
||||
{
|
||||
@@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
||||
nr = 0;
|
||||
}
|
||||
|
||||
if (!tb->active) {
|
||||
global_flush_tlb_page(mm, vaddr);
|
||||
flush_tsb_user_page(mm, vaddr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (nr == 0)
|
||||
tb->mm = mm;
|
||||
|
||||
@@ -68,6 +98,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
||||
if (nr >= TLB_BATCH_NR)
|
||||
flush_tlb_pending();
|
||||
|
||||
out:
|
||||
put_cpu_var(tlb_batch);
|
||||
}
|
||||
|
||||
|
@@ -7,11 +7,10 @@
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tsb.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/oplib.h>
|
||||
|
||||
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
|
||||
@@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
|
||||
}
|
||||
}
|
||||
|
||||
static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
|
||||
unsigned long hash_shift,
|
||||
unsigned long nentries)
|
||||
{
|
||||
unsigned long tag, ent, hash;
|
||||
|
||||
v &= ~0x1UL;
|
||||
hash = tsb_hash(v, hash_shift, nentries);
|
||||
ent = tsb + (hash * sizeof(struct tsb));
|
||||
tag = (v >> 22UL);
|
||||
|
||||
tsb_flush(ent, tag);
|
||||
}
|
||||
|
||||
static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
|
||||
unsigned long tsb, unsigned long nentries)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < tb->tlb_nr; i++) {
|
||||
unsigned long v = tb->vaddrs[i];
|
||||
unsigned long tag, ent, hash;
|
||||
|
||||
v &= ~0x1UL;
|
||||
|
||||
hash = tsb_hash(v, hash_shift, nentries);
|
||||
ent = tsb + (hash * sizeof(struct tsb));
|
||||
tag = (v >> 22UL);
|
||||
|
||||
tsb_flush(ent, tag);
|
||||
}
|
||||
for (i = 0; i < tb->tlb_nr; i++)
|
||||
__flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
|
||||
}
|
||||
|
||||
void flush_tsb_user(struct tlb_batch *tb)
|
||||
@@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb)
|
||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
}
|
||||
|
||||
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
|
||||
{
|
||||
unsigned long nentries, base, flags;
|
||||
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
|
||||
}
|
||||
#endif
|
||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
}
|
||||
|
||||
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
|
||||
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
|
||||
|
||||
|
@@ -52,6 +52,33 @@ __flush_tlb_mm: /* 18 insns */
|
||||
nop
|
||||
nop
|
||||
|
||||
.align 32
|
||||
.globl __flush_tlb_page
|
||||
__flush_tlb_page: /* 22 insns */
|
||||
/* %o0 = context, %o1 = vaddr */
|
||||
rdpr %pstate, %g7
|
||||
andn %g7, PSTATE_IE, %g2
|
||||
wrpr %g2, %pstate
|
||||
mov SECONDARY_CONTEXT, %o4
|
||||
ldxa [%o4] ASI_DMMU, %g2
|
||||
stxa %o0, [%o4] ASI_DMMU
|
||||
andcc %o1, 1, %g0
|
||||
andn %o1, 1, %o3
|
||||
be,pn %icc, 1f
|
||||
or %o3, 0x10, %o3
|
||||
stxa %g0, [%o3] ASI_IMMU_DEMAP
|
||||
1: stxa %g0, [%o3] ASI_DMMU_DEMAP
|
||||
membar #Sync
|
||||
stxa %g2, [%o4] ASI_DMMU
|
||||
sethi %hi(KERNBASE), %o4
|
||||
flush %o4
|
||||
retl
|
||||
wrpr %g7, 0x0, %pstate
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
.align 32
|
||||
.globl __flush_tlb_pending
|
||||
__flush_tlb_pending: /* 26 insns */
|
||||
@@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
|
||||
retl
|
||||
wrpr %g7, 0x0, %pstate
|
||||
|
||||
__cheetah_flush_tlb_page: /* 22 insns */
|
||||
/* %o0 = context, %o1 = vaddr */
|
||||
rdpr %pstate, %g7
|
||||
andn %g7, PSTATE_IE, %g2
|
||||
wrpr %g2, 0x0, %pstate
|
||||
wrpr %g0, 1, %tl
|
||||
mov PRIMARY_CONTEXT, %o4
|
||||
ldxa [%o4] ASI_DMMU, %g2
|
||||
srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
|
||||
sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
|
||||
or %o0, %o3, %o0 /* Preserve nucleus page size fields */
|
||||
stxa %o0, [%o4] ASI_DMMU
|
||||
andcc %o1, 1, %g0
|
||||
be,pn %icc, 1f
|
||||
andn %o1, 1, %o3
|
||||
stxa %g0, [%o3] ASI_IMMU_DEMAP
|
||||
1: stxa %g0, [%o3] ASI_DMMU_DEMAP
|
||||
membar #Sync
|
||||
stxa %g2, [%o4] ASI_DMMU
|
||||
sethi %hi(KERNBASE), %o4
|
||||
flush %o4
|
||||
wrpr %g0, 0, %tl
|
||||
retl
|
||||
wrpr %g7, 0x0, %pstate
|
||||
|
||||
__cheetah_flush_tlb_pending: /* 27 insns */
|
||||
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
|
||||
rdpr %pstate, %g7
|
||||
@@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
|
||||
retl
|
||||
nop
|
||||
|
||||
__hypervisor_flush_tlb_page: /* 11 insns */
|
||||
/* %o0 = context, %o1 = vaddr */
|
||||
mov %o0, %g2
|
||||
mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
|
||||
mov %g2, %o1 /* ARG1: mmu context */
|
||||
mov HV_MMU_ALL, %o2 /* ARG2: flags */
|
||||
srlx %o0, PAGE_SHIFT, %o0
|
||||
sllx %o0, PAGE_SHIFT, %o0
|
||||
ta HV_MMU_UNMAP_ADDR_TRAP
|
||||
brnz,pn %o0, __hypervisor_tlb_tl0_error
|
||||
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
|
||||
retl
|
||||
nop
|
||||
|
||||
__hypervisor_flush_tlb_pending: /* 16 insns */
|
||||
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
|
||||
sllx %o1, 3, %g1
|
||||
@@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
|
||||
call tlb_patch_one
|
||||
mov 19, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_page), %o0
|
||||
or %o0, %lo(__flush_tlb_page), %o0
|
||||
sethi %hi(__cheetah_flush_tlb_page), %o1
|
||||
or %o1, %lo(__cheetah_flush_tlb_page), %o1
|
||||
call tlb_patch_one
|
||||
mov 22, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_pending), %o0
|
||||
or %o0, %lo(__flush_tlb_pending), %o0
|
||||
sethi %hi(__cheetah_flush_tlb_pending), %o1
|
||||
@@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */
|
||||
nop
|
||||
nop
|
||||
|
||||
.globl xcall_flush_tlb_pending
|
||||
xcall_flush_tlb_pending: /* 21 insns */
|
||||
/* %g5=context, %g1=nr, %g7=vaddrs[] */
|
||||
sllx %g1, 3, %g1
|
||||
.globl xcall_flush_tlb_page
|
||||
xcall_flush_tlb_page: /* 17 insns */
|
||||
/* %g5=context, %g1=vaddr */
|
||||
mov PRIMARY_CONTEXT, %g4
|
||||
ldxa [%g4] ASI_DMMU, %g2
|
||||
srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
|
||||
@@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */
|
||||
or %g5, %g4, %g5
|
||||
mov PRIMARY_CONTEXT, %g4
|
||||
stxa %g5, [%g4] ASI_DMMU
|
||||
1: sub %g1, (1 << 3), %g1
|
||||
ldx [%g7 + %g1], %g5
|
||||
andcc %g5, 0x1, %g0
|
||||
andcc %g1, 0x1, %g0
|
||||
be,pn %icc, 2f
|
||||
|
||||
andn %g5, 0x1, %g5
|
||||
andn %g1, 0x1, %g5
|
||||
stxa %g0, [%g5] ASI_IMMU_DEMAP
|
||||
2: stxa %g0, [%g5] ASI_DMMU_DEMAP
|
||||
membar #Sync
|
||||
brnz,pt %g1, 1b
|
||||
nop
|
||||
stxa %g2, [%g4] ASI_DMMU
|
||||
retry
|
||||
nop
|
||||
nop
|
||||
|
||||
.globl xcall_flush_tlb_kernel_range
|
||||
xcall_flush_tlb_kernel_range: /* 25 insns */
|
||||
@@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
|
||||
membar #Sync
|
||||
retry
|
||||
|
||||
.globl __hypervisor_xcall_flush_tlb_pending
|
||||
__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
|
||||
/* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
|
||||
sllx %g1, 3, %g1
|
||||
.globl __hypervisor_xcall_flush_tlb_page
|
||||
__hypervisor_xcall_flush_tlb_page: /* 17 insns */
|
||||
/* %g5=ctx, %g1=vaddr */
|
||||
mov %o0, %g2
|
||||
mov %o1, %g3
|
||||
mov %o2, %g4
|
||||
1: sub %g1, (1 << 3), %g1
|
||||
ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
|
||||
mov %g1, %o0 /* ARG0: virtual address */
|
||||
mov %g5, %o1 /* ARG1: mmu context */
|
||||
mov HV_MMU_ALL, %o2 /* ARG2: flags */
|
||||
srlx %o0, PAGE_SHIFT, %o0
|
||||
@@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
|
||||
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
|
||||
brnz,a,pn %o0, __hypervisor_tlb_xcall_error
|
||||
mov %o0, %g5
|
||||
brnz,pt %g1, 1b
|
||||
nop
|
||||
mov %g2, %o0
|
||||
mov %g3, %o1
|
||||
mov %g4, %o2
|
||||
@@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops:
|
||||
call tlb_patch_one
|
||||
mov 10, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_page), %o0
|
||||
or %o0, %lo(__flush_tlb_page), %o0
|
||||
sethi %hi(__hypervisor_flush_tlb_page), %o1
|
||||
or %o1, %lo(__hypervisor_flush_tlb_page), %o1
|
||||
call tlb_patch_one
|
||||
mov 11, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_pending), %o0
|
||||
or %o0, %lo(__flush_tlb_pending), %o0
|
||||
sethi %hi(__hypervisor_flush_tlb_pending), %o1
|
||||
@@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops:
|
||||
call tlb_patch_one
|
||||
mov 21, %o2
|
||||
|
||||
sethi %hi(xcall_flush_tlb_pending), %o0
|
||||
or %o0, %lo(xcall_flush_tlb_pending), %o0
|
||||
sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
|
||||
or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
|
||||
sethi %hi(xcall_flush_tlb_page), %o0
|
||||
or %o0, %lo(xcall_flush_tlb_page), %o0
|
||||
sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
|
||||
or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
|
||||
call tlb_patch_one
|
||||
mov 21, %o2
|
||||
mov 17, %o2
|
||||
|
||||
sethi %hi(xcall_flush_tlb_kernel_range), %o0
|
||||
or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
|
||||
|
@@ -795,13 +795,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
|
||||
}
|
||||
|
||||
if (bpf_jit_enable > 1)
|
||||
pr_err("flen=%d proglen=%u pass=%d image=%p\n",
|
||||
flen, proglen, pass, image);
|
||||
bpf_jit_dump(flen, proglen, pass, image);
|
||||
|
||||
if (image) {
|
||||
if (bpf_jit_enable > 1)
|
||||
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
|
||||
16, 1, image, proglen, false);
|
||||
bpf_flush_icache(image, image + proglen);
|
||||
fp->bpf_func = (void *)image;
|
||||
}
|
||||
|
Reference in New Issue
Block a user