tile: remove support for TILE64
This chip is no longer being actively developed for (it was superceded by the TILEPro64 in 2008), and in any case the existing compiler and toolchain in the community do not support it. It's unlikely that the kernel works with TILE64 at this point as the configuration has not been tested in years. The support is also awkward as it requires maintaining a significant number of ifdefs. So, just remove it altogether. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
This commit is contained in:
@@ -32,12 +32,6 @@
|
||||
|
||||
#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
|
||||
|
||||
#if !CHIP_HAS_WH64()
|
||||
/* By making this an empty macro, we can use wh64 in the code. */
|
||||
.macro wh64 reg
|
||||
.endm
|
||||
#endif
|
||||
|
||||
.macro push_reg reg, ptr=sp, delta=-4
|
||||
{
|
||||
sw \ptr, \reg
|
||||
@@ -325,18 +319,14 @@ intvec_\vecname:
|
||||
movei r3, -1 /* not used, but set for consistency */
|
||||
}
|
||||
.else
|
||||
#if CHIP_HAS_AUX_PERF_COUNTERS()
|
||||
.ifc \c_routine, op_handle_aux_perf_interrupt
|
||||
{
|
||||
mfspr r2, AUX_PERF_COUNT_STS
|
||||
movei r3, -1 /* not used, but set for consistency */
|
||||
}
|
||||
.else
|
||||
#endif
|
||||
movei r3, 0
|
||||
#if CHIP_HAS_AUX_PERF_COUNTERS()
|
||||
.endif
|
||||
#endif
|
||||
.endif
|
||||
.endif
|
||||
.endif
|
||||
@@ -561,7 +551,6 @@ intvec_\vecname:
|
||||
.endif
|
||||
mtspr INTERRUPT_CRITICAL_SECTION, zero
|
||||
|
||||
#if CHIP_HAS_WH64()
|
||||
/*
|
||||
* Prepare the first 256 stack bytes to be rapidly accessible
|
||||
* without having to fetch the background data. We don't really
|
||||
@@ -582,7 +571,6 @@ intvec_\vecname:
|
||||
addi r52, r52, -64
|
||||
}
|
||||
wh64 r52
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
.ifnc \function,handle_nmi
|
||||
@@ -1533,12 +1521,10 @@ STD_ENTRY(_sys_clone)
|
||||
__HEAD
|
||||
.align 64
|
||||
/* Align much later jump on the start of a cache line. */
|
||||
#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
|
||||
nop
|
||||
#if PAGE_SIZE >= 0x10000
|
||||
nop
|
||||
#endif
|
||||
#endif
|
||||
ENTRY(sys_cmpxchg)
|
||||
|
||||
/*
|
||||
@@ -1572,45 +1558,6 @@ ENTRY(sys_cmpxchg)
|
||||
# error Code here assumes PAGE_OFFSET can be loaded with just hi16()
|
||||
#endif
|
||||
|
||||
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
|
||||
{
|
||||
/* Check for unaligned input. */
|
||||
bnz sp, .Lcmpxchg_badaddr
|
||||
mm r25, r0, zero, 3, PAGE_SHIFT-1
|
||||
}
|
||||
{
|
||||
crc32_32 r25, zero, r25
|
||||
moveli r21, lo16(atomic_lock_ptr)
|
||||
}
|
||||
{
|
||||
auli r21, r21, ha16(atomic_lock_ptr)
|
||||
auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
|
||||
}
|
||||
{
|
||||
shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT
|
||||
slt_u r23, r0, r23
|
||||
lw r26, r0 /* see comment in the "#else" for the "lw r26". */
|
||||
}
|
||||
{
|
||||
s2a r21, r20, r21
|
||||
bbns r23, .Lcmpxchg_badaddr
|
||||
}
|
||||
{
|
||||
lw r21, r21
|
||||
seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
|
||||
andi r25, r25, ATOMIC_HASH_L2_SIZE - 1
|
||||
}
|
||||
{
|
||||
/* Branch away at this point if we're doing a 64-bit cmpxchg. */
|
||||
bbs r23, .Lcmpxchg64
|
||||
andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
|
||||
}
|
||||
{
|
||||
s2a ATOMIC_LOCK_REG_NAME, r25, r21
|
||||
j .Lcmpxchg32_tns /* see comment in the #else for the jump. */
|
||||
}
|
||||
|
||||
#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
|
||||
{
|
||||
/* Check for unaligned input. */
|
||||
bnz sp, .Lcmpxchg_badaddr
|
||||
@@ -1635,12 +1582,9 @@ ENTRY(sys_cmpxchg)
|
||||
|
||||
/*
|
||||
* Ensure that the TLB is loaded before we take out the lock.
|
||||
* On tilepro, this will start fetching the value all the way
|
||||
* into our L1 as well (and if it gets modified before we
|
||||
* grab the lock, it will be invalidated from our cache
|
||||
* before we reload it). On tile64, we'll start fetching it
|
||||
* into our L1 if we're the home, and if we're not, we'll
|
||||
* still at least start fetching it into the home's L2.
|
||||
* This will start fetching the value all the way into our L1
|
||||
* as well (and if it gets modified before we grab the lock,
|
||||
* it will be invalidated from our cache before we reload it).
|
||||
*/
|
||||
lw r26, r0
|
||||
}
|
||||
@@ -1683,8 +1627,6 @@ ENTRY(sys_cmpxchg)
|
||||
j .Lcmpxchg32_tns
|
||||
}
|
||||
|
||||
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
|
||||
|
||||
/* Symbol for do_page_fault_ics() to use to compare against the PC. */
|
||||
.global __sys_cmpxchg_grab_lock
|
||||
__sys_cmpxchg_grab_lock:
|
||||
@@ -1822,9 +1764,6 @@ __sys_cmpxchg_grab_lock:
|
||||
.align 64
|
||||
.Lcmpxchg64:
|
||||
{
|
||||
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
|
||||
s2a ATOMIC_LOCK_REG_NAME, r25, r21
|
||||
#endif
|
||||
bzt r23, .Lcmpxchg64_tns
|
||||
}
|
||||
j .Lcmpxchg_badaddr
|
||||
@@ -1959,10 +1898,8 @@ int_unalign:
|
||||
do_page_fault
|
||||
int_hand INT_SN_CPL, SN_CPL, bad_intr
|
||||
int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
|
||||
#if CHIP_HAS_AUX_PERF_COUNTERS()
|
||||
int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
|
||||
op_handle_aux_perf_interrupt, handle_nmi
|
||||
#endif
|
||||
|
||||
/* Synthetic interrupt delivered only by the simulator */
|
||||
int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint
|
||||
|
@@ -511,12 +511,10 @@ intvec_\vecname:
|
||||
.else
|
||||
.ifc \c_routine, op_handle_perf_interrupt
|
||||
mfspr r2, PERF_COUNT_STS
|
||||
#if CHIP_HAS_AUX_PERF_COUNTERS()
|
||||
.else
|
||||
.ifc \c_routine, op_handle_aux_perf_interrupt
|
||||
mfspr r2, AUX_PERF_COUNT_STS
|
||||
.endif
|
||||
#endif
|
||||
.endif
|
||||
.endif
|
||||
.endif
|
||||
|
@@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(available_irqs_lock);
|
||||
|
||||
/*
|
||||
* The interrupt handling path, implemented in terms of HV interrupt
|
||||
* emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx.
|
||||
* emulation on TILEPro, and IPI hardware on TILE-Gx.
|
||||
* Entered with interrupts disabled.
|
||||
*/
|
||||
void tile_dev_intr(struct pt_regs *regs, int intnum)
|
||||
@@ -235,7 +235,7 @@ void tile_irq_activate(unsigned int irq, int tile_irq_type)
|
||||
{
|
||||
/*
|
||||
* We use handle_level_irq() by default because the pending
|
||||
* interrupt vector (whether modeled by the HV on TILE64 and
|
||||
* interrupt vector (whether modeled by the HV on
|
||||
* TILEPro or implemented in hardware on TILE-Gx) has
|
||||
* level-style semantics for each bit. An interrupt fires
|
||||
* whenever a bit is high, not just at edges.
|
||||
|
@@ -187,16 +187,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
||||
memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
|
||||
#endif
|
||||
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
/* Likewise, the new thread is not running static processor code. */
|
||||
p->thread.sn_proc_running = 0;
|
||||
memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb));
|
||||
#endif
|
||||
|
||||
#if CHIP_HAS_PROC_STATUS_SPR()
|
||||
/* New thread has its miscellaneous processor state bits clear. */
|
||||
p->thread.proc_status = 0;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HARDWALL
|
||||
/* New thread does not own any networks. */
|
||||
@@ -378,15 +370,11 @@ static void save_arch_state(struct thread_struct *t)
|
||||
t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
|
||||
t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
|
||||
t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
|
||||
#if CHIP_HAS_PROC_STATUS_SPR()
|
||||
t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
|
||||
#endif
|
||||
#if !CHIP_HAS_FIXED_INTVEC_BASE()
|
||||
t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
|
||||
#endif
|
||||
#if CHIP_HAS_TILE_RTF_HWM()
|
||||
t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
|
||||
#endif
|
||||
#if CHIP_HAS_DSTREAM_PF()
|
||||
t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
|
||||
#endif
|
||||
@@ -407,15 +395,11 @@ static void restore_arch_state(const struct thread_struct *t)
|
||||
__insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
|
||||
__insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
|
||||
__insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
|
||||
#if CHIP_HAS_PROC_STATUS_SPR()
|
||||
__insn_mtspr(SPR_PROC_STATUS, t->proc_status);
|
||||
#endif
|
||||
#if !CHIP_HAS_FIXED_INTVEC_BASE()
|
||||
__insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
|
||||
#endif
|
||||
#if CHIP_HAS_TILE_RTF_HWM()
|
||||
__insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
|
||||
#endif
|
||||
#if CHIP_HAS_DSTREAM_PF()
|
||||
__insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
|
||||
#endif
|
||||
@@ -424,26 +408,11 @@ static void restore_arch_state(const struct thread_struct *t)
|
||||
|
||||
void _prepare_arch_switch(struct task_struct *next)
|
||||
{
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
int snctl;
|
||||
#endif
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
struct tile_dma_state *dma = ¤t->thread.tile_dma_state;
|
||||
if (dma->enabled)
|
||||
save_tile_dma_state(dma);
|
||||
#endif
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
/*
|
||||
* Suspend the static network processor if it was running.
|
||||
* We do not suspend the fabric itself, just like we don't
|
||||
* try to suspend the UDN.
|
||||
*/
|
||||
snctl = __insn_mfspr(SPR_SNCTL);
|
||||
current->thread.sn_proc_running =
|
||||
(snctl & SPR_SNCTL__FRZPROC_MASK) == 0;
|
||||
if (current->thread.sn_proc_running)
|
||||
__insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@@ -471,17 +440,6 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
|
||||
/* Restore other arch state. */
|
||||
restore_arch_state(&next->thread);
|
||||
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
/*
|
||||
* Restart static network processor in the new process
|
||||
* if it was running before.
|
||||
*/
|
||||
if (next->thread.sn_proc_running) {
|
||||
int snctl = __insn_mfspr(SPR_SNCTL);
|
||||
__insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HARDWALL
|
||||
/* Enable or disable access to the network registers appropriately. */
|
||||
hardwall_switch_tasks(prev, next);
|
||||
@@ -523,7 +481,7 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
|
||||
schedule();
|
||||
return 1;
|
||||
}
|
||||
#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
if (thread_info_flags & _TIF_ASYNC_TLB) {
|
||||
do_async_page_fault(regs);
|
||||
return 1;
|
||||
|
@@ -77,7 +77,6 @@ STD_ENTRY(relocate_new_kernel)
|
||||
move r30, sp
|
||||
addi sp, sp, -8
|
||||
|
||||
#if CHIP_HAS_CBOX_HOME_MAP()
|
||||
/*
|
||||
* On TILEPro, we need to flush all tiles' caches, since we may
|
||||
* have been doing hash-for-home caching there. Note that we
|
||||
@@ -113,7 +112,6 @@ STD_ENTRY(relocate_new_kernel)
|
||||
}
|
||||
|
||||
jalr r20
|
||||
#endif
|
||||
|
||||
/* r33 is destination pointer, default to zero */
|
||||
|
||||
|
@@ -78,7 +78,6 @@ STD_ENTRY(relocate_new_kernel)
|
||||
move r30, sp
|
||||
addi sp, sp, -16
|
||||
|
||||
#if CHIP_HAS_CBOX_HOME_MAP()
|
||||
/*
|
||||
* On TILE-GX, we need to flush all tiles' caches, since we may
|
||||
* have been doing hash-for-home caching there. Note that we
|
||||
@@ -116,7 +115,6 @@ STD_ENTRY(relocate_new_kernel)
|
||||
shl16insli r20, r20, hw0(hv_flush_remote)
|
||||
|
||||
jalr r20
|
||||
#endif
|
||||
|
||||
/* r33 is destination pointer, default to zero */
|
||||
|
||||
|
@@ -1046,9 +1046,6 @@ void __cpuinit setup_cpu(int boot)
|
||||
arch_local_irq_unmask(INT_DMATLB_MISS);
|
||||
arch_local_irq_unmask(INT_DMATLB_ACCESS);
|
||||
#endif
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
arch_local_irq_unmask(INT_SNITLB_MISS);
|
||||
#endif
|
||||
#ifdef __tilegx__
|
||||
arch_local_irq_unmask(INT_SINGLE_STEP_K);
|
||||
#endif
|
||||
@@ -1063,10 +1060,6 @@ void __cpuinit setup_cpu(int boot)
|
||||
/* Static network is not restricted. */
|
||||
__insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1);
|
||||
#endif
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
__insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1);
|
||||
__insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set the MPL for interrupt control 0 & 1 to the corresponding
|
||||
@@ -1291,7 +1284,6 @@ static void __init validate_va(void)
|
||||
struct cpumask __write_once cpu_lotar_map;
|
||||
EXPORT_SYMBOL(cpu_lotar_map);
|
||||
|
||||
#if CHIP_HAS_CBOX_HOME_MAP()
|
||||
/*
|
||||
* hash_for_home_map lists all the tiles that hash-for-home data
|
||||
* will be cached on. Note that this may includes tiles that are not
|
||||
@@ -1301,7 +1293,6 @@ EXPORT_SYMBOL(cpu_lotar_map);
|
||||
*/
|
||||
struct cpumask hash_for_home_map;
|
||||
EXPORT_SYMBOL(hash_for_home_map);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* cpu_cacheable_map lists all the cpus whose caches the hypervisor can
|
||||
@@ -1394,7 +1385,6 @@ static void __init setup_cpu_maps(void)
|
||||
cpu_lotar_map = *cpu_possible_mask;
|
||||
}
|
||||
|
||||
#if CHIP_HAS_CBOX_HOME_MAP()
|
||||
/* Retrieve set of CPUs used for hash-for-home caching */
|
||||
rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE,
|
||||
(HV_VirtAddr) hash_for_home_map.bits,
|
||||
@@ -1402,9 +1392,6 @@ static void __init setup_cpu_maps(void)
|
||||
if (rc < 0)
|
||||
early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
|
||||
cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
|
||||
#else
|
||||
cpu_cacheable_map = *cpu_possible_mask;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@@ -546,7 +546,6 @@ void single_step_once(struct pt_regs *regs)
|
||||
}
|
||||
break;
|
||||
|
||||
#if CHIP_HAS_WH64()
|
||||
/* postincrement operations */
|
||||
case IMM_0_OPCODE_X1:
|
||||
switch (get_ImmOpcodeExtension_X1(bundle)) {
|
||||
@@ -581,7 +580,6 @@ void single_step_once(struct pt_regs *regs)
|
||||
break;
|
||||
}
|
||||
break;
|
||||
#endif /* CHIP_HAS_WH64() */
|
||||
}
|
||||
|
||||
if (state->update) {
|
||||
|
مرجع در شماره جدید
Block a user