Merge branch 'linus' into locking/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -34,7 +34,9 @@ config PARISC
|
||||
select HAVE_ARCH_HASH
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT)
|
||||
select GENERIC_SCHED_CLOCK
|
||||
select HAVE_UNSTABLE_SCHED_CLOCK if SMP
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select ARCH_NO_COHERENT_DMA_MMAP
|
||||
select CPU_NO_EFFICIENT_FFS
|
||||
|
||||
|
@@ -65,9 +65,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
|
||||
unsigned long flags; \
|
||||
spin_lock_irqsave(&pa_tlb_lock, flags); \
|
||||
old_pte = *ptep; \
|
||||
set_pte(ptep, pteval); \
|
||||
if (pte_inserted(old_pte)) \
|
||||
purge_tlb_entries(mm, addr); \
|
||||
set_pte(ptep, pteval); \
|
||||
spin_unlock_irqrestore(&pa_tlb_lock, flags); \
|
||||
} while (0)
|
||||
|
||||
@@ -478,8 +478,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
|
||||
spin_unlock_irqrestore(&pa_tlb_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
set_pte(ptep, pte_mkold(pte));
|
||||
purge_tlb_entries(vma->vm_mm, addr);
|
||||
set_pte(ptep, pte_mkold(pte));
|
||||
spin_unlock_irqrestore(&pa_tlb_lock, flags);
|
||||
return 1;
|
||||
}
|
||||
@@ -492,9 +492,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
spin_lock_irqsave(&pa_tlb_lock, flags);
|
||||
old_pte = *ptep;
|
||||
set_pte(ptep, __pte(0));
|
||||
if (pte_inserted(old_pte))
|
||||
purge_tlb_entries(mm, addr);
|
||||
set_pte(ptep, __pte(0));
|
||||
spin_unlock_irqrestore(&pa_tlb_lock, flags);
|
||||
|
||||
return old_pte;
|
||||
@@ -504,8 +504,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
||||
{
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&pa_tlb_lock, flags);
|
||||
set_pte(ptep, pte_wrprotect(*ptep));
|
||||
purge_tlb_entries(mm, addr);
|
||||
set_pte(ptep, pte_wrprotect(*ptep));
|
||||
spin_unlock_irqrestore(&pa_tlb_lock, flags);
|
||||
}
|
||||
|
||||
|
@@ -369,6 +369,7 @@ void __init parisc_setup_cache_timing(void)
|
||||
{
|
||||
unsigned long rangetime, alltime;
|
||||
unsigned long size, start;
|
||||
unsigned long threshold;
|
||||
|
||||
alltime = mfctl(16);
|
||||
flush_data_cache();
|
||||
@@ -382,26 +383,30 @@ void __init parisc_setup_cache_timing(void)
|
||||
printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
|
||||
alltime, size, rangetime);
|
||||
|
||||
/* Racy, but if we see an intermediate value, it's ok too... */
|
||||
parisc_cache_flush_threshold = size * alltime / rangetime;
|
||||
|
||||
parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
|
||||
if (!parisc_cache_flush_threshold)
|
||||
parisc_cache_flush_threshold = FLUSH_THRESHOLD;
|
||||
|
||||
if (parisc_cache_flush_threshold > cache_info.dc_size)
|
||||
parisc_cache_flush_threshold = cache_info.dc_size;
|
||||
|
||||
printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
|
||||
threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
|
||||
if (threshold > cache_info.dc_size)
|
||||
threshold = cache_info.dc_size;
|
||||
if (threshold)
|
||||
parisc_cache_flush_threshold = threshold;
|
||||
printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
|
||||
parisc_cache_flush_threshold/1024);
|
||||
|
||||
/* calculate TLB flush threshold */
|
||||
|
||||
/* On SMP machines, skip the TLB measure of kernel text which
|
||||
* has been mapped as huge pages. */
|
||||
if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
|
||||
threshold = max(cache_info.it_size, cache_info.dt_size);
|
||||
threshold *= PAGE_SIZE;
|
||||
threshold /= num_online_cpus();
|
||||
goto set_tlb_threshold;
|
||||
}
|
||||
|
||||
alltime = mfctl(16);
|
||||
flush_tlb_all();
|
||||
alltime = mfctl(16) - alltime;
|
||||
|
||||
size = PAGE_SIZE;
|
||||
size = 0;
|
||||
start = (unsigned long) _text;
|
||||
rangetime = mfctl(16);
|
||||
while (start < (unsigned long) _end) {
|
||||
@@ -414,13 +419,12 @@ void __init parisc_setup_cache_timing(void)
|
||||
printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
|
||||
alltime, size, rangetime);
|
||||
|
||||
parisc_tlb_flush_threshold = size * alltime / rangetime;
|
||||
parisc_tlb_flush_threshold *= num_online_cpus();
|
||||
parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
|
||||
if (!parisc_tlb_flush_threshold)
|
||||
parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
|
||||
threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
|
||||
|
||||
printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
|
||||
set_tlb_threshold:
|
||||
if (threshold)
|
||||
parisc_tlb_flush_threshold = threshold;
|
||||
printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
|
||||
parisc_tlb_flush_threshold/1024);
|
||||
}
|
||||
|
||||
|
@@ -58,7 +58,7 @@ void __init setup_pdc(void)
|
||||
status = pdc_system_map_find_mods(&module_result, &module_path, 0);
|
||||
if (status == PDC_OK) {
|
||||
pdc_type = PDC_TYPE_SYSTEM_MAP;
|
||||
printk("System Map.\n");
|
||||
pr_cont("System Map.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ void __init setup_pdc(void)
|
||||
status = pdc_pat_cell_get_number(&cell_info);
|
||||
if (status == PDC_OK) {
|
||||
pdc_type = PDC_TYPE_PAT;
|
||||
printk("64 bit PAT.\n");
|
||||
pr_cont("64 bit PAT.\n");
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
@@ -97,12 +97,12 @@ void __init setup_pdc(void)
|
||||
case 0xC: /* 715/64, at least */
|
||||
|
||||
pdc_type = PDC_TYPE_SNAKE;
|
||||
printk("Snake.\n");
|
||||
pr_cont("Snake.\n");
|
||||
return;
|
||||
|
||||
default: /* Everything else */
|
||||
|
||||
printk("Unsupported.\n");
|
||||
pr_cont("Unsupported.\n");
|
||||
panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
|
||||
}
|
||||
}
|
||||
|
@@ -96,7 +96,7 @@ fitmanyloop: /* Loop if LOOP >= 2 */
|
||||
|
||||
fitmanymiddle: /* Loop if LOOP >= 2 */
|
||||
addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
|
||||
pitlbe 0(%sr1, %r28)
|
||||
pitlbe %r0(%sr1, %r28)
|
||||
pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
|
||||
addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
|
||||
copy %arg3, %r31 /* Re-init inner loop count */
|
||||
@@ -139,7 +139,7 @@ fdtmanyloop: /* Loop if LOOP >= 2 */
|
||||
|
||||
fdtmanymiddle: /* Loop if LOOP >= 2 */
|
||||
addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
|
||||
pdtlbe 0(%sr1, %r28)
|
||||
pdtlbe %r0(%sr1, %r28)
|
||||
pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
|
||||
addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
|
||||
copy %arg3, %r31 /* Re-init inner loop count */
|
||||
@@ -626,12 +626,12 @@ ENTRY_CFI(copy_user_page_asm)
|
||||
/* Purge any old translations */
|
||||
|
||||
#ifdef CONFIG_PA20
|
||||
pdtlb,l 0(%r28)
|
||||
pdtlb,l 0(%r29)
|
||||
pdtlb,l %r0(%r28)
|
||||
pdtlb,l %r0(%r29)
|
||||
#else
|
||||
tlb_lock %r20,%r21,%r22
|
||||
pdtlb 0(%r28)
|
||||
pdtlb 0(%r29)
|
||||
pdtlb %r0(%r28)
|
||||
pdtlb %r0(%r29)
|
||||
tlb_unlock %r20,%r21,%r22
|
||||
#endif
|
||||
|
||||
@@ -774,10 +774,10 @@ ENTRY_CFI(clear_user_page_asm)
|
||||
/* Purge any old translation */
|
||||
|
||||
#ifdef CONFIG_PA20
|
||||
pdtlb,l 0(%r28)
|
||||
pdtlb,l %r0(%r28)
|
||||
#else
|
||||
tlb_lock %r20,%r21,%r22
|
||||
pdtlb 0(%r28)
|
||||
pdtlb %r0(%r28)
|
||||
tlb_unlock %r20,%r21,%r22
|
||||
#endif
|
||||
|
||||
@@ -858,10 +858,10 @@ ENTRY_CFI(flush_dcache_page_asm)
|
||||
/* Purge any old translation */
|
||||
|
||||
#ifdef CONFIG_PA20
|
||||
pdtlb,l 0(%r28)
|
||||
pdtlb,l %r0(%r28)
|
||||
#else
|
||||
tlb_lock %r20,%r21,%r22
|
||||
pdtlb 0(%r28)
|
||||
pdtlb %r0(%r28)
|
||||
tlb_unlock %r20,%r21,%r22
|
||||
#endif
|
||||
|
||||
@@ -892,19 +892,10 @@ ENTRY_CFI(flush_dcache_page_asm)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
cmpb,COND(<<) %r28, %r25,1b
|
||||
cmpb,COND(<<) %r28, %r25,1b
|
||||
fdc,m r31(%r28)
|
||||
|
||||
sync
|
||||
|
||||
#ifdef CONFIG_PA20
|
||||
pdtlb,l 0(%r25)
|
||||
#else
|
||||
tlb_lock %r20,%r21,%r22
|
||||
pdtlb 0(%r25)
|
||||
tlb_unlock %r20,%r21,%r22
|
||||
#endif
|
||||
|
||||
bv %r0(%r2)
|
||||
nop
|
||||
.exit
|
||||
@@ -931,13 +922,18 @@ ENTRY_CFI(flush_icache_page_asm)
|
||||
depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
|
||||
#endif
|
||||
|
||||
/* Purge any old translation */
|
||||
/* Purge any old translation. Note that the FIC instruction
|
||||
* may use either the instruction or data TLB. Given that we
|
||||
* have a flat address space, it's not clear which TLB will be
|
||||
* used. So, we purge both entries. */
|
||||
|
||||
#ifdef CONFIG_PA20
|
||||
pdtlb,l %r0(%r28)
|
||||
pitlb,l %r0(%sr4,%r28)
|
||||
#else
|
||||
tlb_lock %r20,%r21,%r22
|
||||
pitlb (%sr4,%r28)
|
||||
pdtlb %r0(%r28)
|
||||
pitlb %r0(%sr4,%r28)
|
||||
tlb_unlock %r20,%r21,%r22
|
||||
#endif
|
||||
|
||||
@@ -974,15 +970,6 @@ ENTRY_CFI(flush_icache_page_asm)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
|
||||
sync
|
||||
|
||||
#ifdef CONFIG_PA20
|
||||
pitlb,l %r0(%sr4,%r25)
|
||||
#else
|
||||
tlb_lock %r20,%r21,%r22
|
||||
pitlb (%sr4,%r25)
|
||||
tlb_unlock %r20,%r21,%r22
|
||||
#endif
|
||||
|
||||
bv %r0(%r2)
|
||||
nop
|
||||
.exit
|
||||
|
@@ -95,8 +95,8 @@ static inline int map_pte_uncached(pte_t * pte,
|
||||
|
||||
if (!pte_none(*pte))
|
||||
printk(KERN_ERR "map_pte_uncached: page already exists\n");
|
||||
set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
|
||||
purge_tlb_start(flags);
|
||||
set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
|
||||
pdtlb_kernel(orig_vaddr);
|
||||
purge_tlb_end(flags);
|
||||
vaddr += PAGE_SIZE;
|
||||
|
@@ -334,6 +334,10 @@ static int __init parisc_init(void)
|
||||
/* tell PDC we're Linux. Nevermind failure. */
|
||||
pdc_stable_write(0x40, &osid, sizeof(osid));
|
||||
|
||||
/* start with known state */
|
||||
flush_cache_all_local();
|
||||
flush_tlb_all_local(NULL);
|
||||
|
||||
processor_init();
|
||||
#ifdef CONFIG_SMP
|
||||
pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n",
|
||||
|
@@ -14,6 +14,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/rtc.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched_clock.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/param.h>
|
||||
#include <linux/string.h>
|
||||
@@ -39,18 +40,6 @@
|
||||
|
||||
static unsigned long clocktick __read_mostly; /* timer cycles per tick */
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
/*
|
||||
* The processor-internal cycle counter (Control Register 16) is used as time
|
||||
* source for the sched_clock() function. This register is 64bit wide on a
|
||||
* 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always
|
||||
* requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits
|
||||
* with a per-cpu variable which we increase every time the counter
|
||||
* wraps-around (which happens every ~4 secounds).
|
||||
*/
|
||||
static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We keep time on PA-RISC Linux by using the Interval Timer which is
|
||||
* a pair of registers; one is read-only and one is write-only; both
|
||||
@@ -121,12 +110,6 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
|
||||
*/
|
||||
mtctl(next_tick, 16);
|
||||
|
||||
#if !defined(CONFIG_64BIT)
|
||||
/* check for overflow on a 32bit kernel (every ~4 seconds). */
|
||||
if (unlikely(next_tick < now))
|
||||
this_cpu_inc(cr16_high_32_bits);
|
||||
#endif
|
||||
|
||||
/* Skip one clocktick on purpose if we missed next_tick.
|
||||
* The new CR16 must be "later" than current CR16 otherwise
|
||||
* itimer would not fire until CR16 wrapped - e.g 4 seconds
|
||||
@@ -208,7 +191,7 @@ EXPORT_SYMBOL(profile_pc);
|
||||
|
||||
/* clock source code */
|
||||
|
||||
static cycle_t read_cr16(struct clocksource *cs)
|
||||
static cycle_t notrace read_cr16(struct clocksource *cs)
|
||||
{
|
||||
return get_cycles();
|
||||
}
|
||||
@@ -287,26 +270,9 @@ void read_persistent_clock(struct timespec *ts)
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* sched_clock() framework
|
||||
*/
|
||||
|
||||
static u32 cyc2ns_mul __read_mostly;
|
||||
static u32 cyc2ns_shift __read_mostly;
|
||||
|
||||
u64 sched_clock(void)
|
||||
static u64 notrace read_cr16_sched_clock(void)
|
||||
{
|
||||
u64 now;
|
||||
|
||||
/* Get current cycle counter (Control Register 16). */
|
||||
#ifdef CONFIG_64BIT
|
||||
now = mfctl(16);
|
||||
#else
|
||||
now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32);
|
||||
#endif
|
||||
|
||||
/* return the value in ns (cycles_2_ns) */
|
||||
return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift);
|
||||
return get_cycles();
|
||||
}
|
||||
|
||||
|
||||
@@ -316,17 +282,16 @@ u64 sched_clock(void)
|
||||
|
||||
void __init time_init(void)
|
||||
{
|
||||
unsigned long current_cr16_khz;
|
||||
unsigned long cr16_hz;
|
||||
|
||||
current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
|
||||
clocktick = (100 * PAGE0->mem_10msec) / HZ;
|
||||
|
||||
/* calculate mult/shift values for cr16 */
|
||||
clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
|
||||
NSEC_PER_MSEC, 0);
|
||||
|
||||
start_cpu_itimer(); /* get CPU 0 started */
|
||||
|
||||
cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
|
||||
|
||||
/* register at clocksource framework */
|
||||
clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
|
||||
clocksource_register_hz(&clocksource_cr16, cr16_hz);
|
||||
|
||||
/* register as sched_clock source */
|
||||
sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
|
||||
}
|
||||
|
Reference in New Issue
Block a user