Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
This commit is contained in:
Linus Torvalds
2005-04-16 15:20:36 -07:00
commit 1da177e4c3
17291 changed files with 6718755 additions and 0 deletions

View File

@@ -0,0 +1,24 @@
#
# Makefile for arch/parisc/kernel
#
extra-y := init_task.o head.o vmlinux.lds
AFLAGS_entry.o := -traditional
AFLAGS_pacache.o := -traditional
CFLAGS_ioctl32.o := -Ifs/
obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
ptrace.o hardware.o inventory.o drivers.o semaphore.o \
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
topology.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PA11) += pci-dma.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o ioctl32.o signal32.o
# only supported for PCX-W/U in 64-bit mode at the moment
obj-$(CONFIG_64BIT) += perf.o perf_asm.o

View File

@@ -0,0 +1,299 @@
/*
* Generate definitions needed by assembly language modules.
* This code generates raw asm output which is post-processed to extract
* and format the required data.
*
* Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
* Copyright (C) 2000 Sam Creasey <sammy@sammy.net>
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2001 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2001 Richard Hirst <rhirst at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/version.h>
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/pdc.h>
#include <asm/uaccess.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : : )
#ifdef __LP64__
#define FRAME_SIZE 128
#else
#define FRAME_SIZE 64
#endif
#define align(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
int main(void)
{
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info));
DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_PERSONALITY, offsetof(struct task_struct, personality));
DEFINE(TASK_PID, offsetof(struct task_struct, pid));
BLANK();
DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));
DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));
DEFINE(TASK_PT_GR1, offsetof(struct task_struct, thread.regs.gr[ 1]));
DEFINE(TASK_PT_GR2, offsetof(struct task_struct, thread.regs.gr[ 2]));
DEFINE(TASK_PT_GR3, offsetof(struct task_struct, thread.regs.gr[ 3]));
DEFINE(TASK_PT_GR4, offsetof(struct task_struct, thread.regs.gr[ 4]));
DEFINE(TASK_PT_GR5, offsetof(struct task_struct, thread.regs.gr[ 5]));
DEFINE(TASK_PT_GR6, offsetof(struct task_struct, thread.regs.gr[ 6]));
DEFINE(TASK_PT_GR7, offsetof(struct task_struct, thread.regs.gr[ 7]));
DEFINE(TASK_PT_GR8, offsetof(struct task_struct, thread.regs.gr[ 8]));
DEFINE(TASK_PT_GR9, offsetof(struct task_struct, thread.regs.gr[ 9]));
DEFINE(TASK_PT_GR10, offsetof(struct task_struct, thread.regs.gr[10]));
DEFINE(TASK_PT_GR11, offsetof(struct task_struct, thread.regs.gr[11]));
DEFINE(TASK_PT_GR12, offsetof(struct task_struct, thread.regs.gr[12]));
DEFINE(TASK_PT_GR13, offsetof(struct task_struct, thread.regs.gr[13]));
DEFINE(TASK_PT_GR14, offsetof(struct task_struct, thread.regs.gr[14]));
DEFINE(TASK_PT_GR15, offsetof(struct task_struct, thread.regs.gr[15]));
DEFINE(TASK_PT_GR16, offsetof(struct task_struct, thread.regs.gr[16]));
DEFINE(TASK_PT_GR17, offsetof(struct task_struct, thread.regs.gr[17]));
DEFINE(TASK_PT_GR18, offsetof(struct task_struct, thread.regs.gr[18]));
DEFINE(TASK_PT_GR19, offsetof(struct task_struct, thread.regs.gr[19]));
DEFINE(TASK_PT_GR20, offsetof(struct task_struct, thread.regs.gr[20]));
DEFINE(TASK_PT_GR21, offsetof(struct task_struct, thread.regs.gr[21]));
DEFINE(TASK_PT_GR22, offsetof(struct task_struct, thread.regs.gr[22]));
DEFINE(TASK_PT_GR23, offsetof(struct task_struct, thread.regs.gr[23]));
DEFINE(TASK_PT_GR24, offsetof(struct task_struct, thread.regs.gr[24]));
DEFINE(TASK_PT_GR25, offsetof(struct task_struct, thread.regs.gr[25]));
DEFINE(TASK_PT_GR26, offsetof(struct task_struct, thread.regs.gr[26]));
DEFINE(TASK_PT_GR27, offsetof(struct task_struct, thread.regs.gr[27]));
DEFINE(TASK_PT_GR28, offsetof(struct task_struct, thread.regs.gr[28]));
DEFINE(TASK_PT_GR29, offsetof(struct task_struct, thread.regs.gr[29]));
DEFINE(TASK_PT_GR30, offsetof(struct task_struct, thread.regs.gr[30]));
DEFINE(TASK_PT_GR31, offsetof(struct task_struct, thread.regs.gr[31]));
DEFINE(TASK_PT_FR0, offsetof(struct task_struct, thread.regs.fr[ 0]));
DEFINE(TASK_PT_FR1, offsetof(struct task_struct, thread.regs.fr[ 1]));
DEFINE(TASK_PT_FR2, offsetof(struct task_struct, thread.regs.fr[ 2]));
DEFINE(TASK_PT_FR3, offsetof(struct task_struct, thread.regs.fr[ 3]));
DEFINE(TASK_PT_FR4, offsetof(struct task_struct, thread.regs.fr[ 4]));
DEFINE(TASK_PT_FR5, offsetof(struct task_struct, thread.regs.fr[ 5]));
DEFINE(TASK_PT_FR6, offsetof(struct task_struct, thread.regs.fr[ 6]));
DEFINE(TASK_PT_FR7, offsetof(struct task_struct, thread.regs.fr[ 7]));
DEFINE(TASK_PT_FR8, offsetof(struct task_struct, thread.regs.fr[ 8]));
DEFINE(TASK_PT_FR9, offsetof(struct task_struct, thread.regs.fr[ 9]));
DEFINE(TASK_PT_FR10, offsetof(struct task_struct, thread.regs.fr[10]));
DEFINE(TASK_PT_FR11, offsetof(struct task_struct, thread.regs.fr[11]));
DEFINE(TASK_PT_FR12, offsetof(struct task_struct, thread.regs.fr[12]));
DEFINE(TASK_PT_FR13, offsetof(struct task_struct, thread.regs.fr[13]));
DEFINE(TASK_PT_FR14, offsetof(struct task_struct, thread.regs.fr[14]));
DEFINE(TASK_PT_FR15, offsetof(struct task_struct, thread.regs.fr[15]));
DEFINE(TASK_PT_FR16, offsetof(struct task_struct, thread.regs.fr[16]));
DEFINE(TASK_PT_FR17, offsetof(struct task_struct, thread.regs.fr[17]));
DEFINE(TASK_PT_FR18, offsetof(struct task_struct, thread.regs.fr[18]));
DEFINE(TASK_PT_FR19, offsetof(struct task_struct, thread.regs.fr[19]));
DEFINE(TASK_PT_FR20, offsetof(struct task_struct, thread.regs.fr[20]));
DEFINE(TASK_PT_FR21, offsetof(struct task_struct, thread.regs.fr[21]));
DEFINE(TASK_PT_FR22, offsetof(struct task_struct, thread.regs.fr[22]));
DEFINE(TASK_PT_FR23, offsetof(struct task_struct, thread.regs.fr[23]));
DEFINE(TASK_PT_FR24, offsetof(struct task_struct, thread.regs.fr[24]));
DEFINE(TASK_PT_FR25, offsetof(struct task_struct, thread.regs.fr[25]));
DEFINE(TASK_PT_FR26, offsetof(struct task_struct, thread.regs.fr[26]));
DEFINE(TASK_PT_FR27, offsetof(struct task_struct, thread.regs.fr[27]));
DEFINE(TASK_PT_FR28, offsetof(struct task_struct, thread.regs.fr[28]));
DEFINE(TASK_PT_FR29, offsetof(struct task_struct, thread.regs.fr[29]));
DEFINE(TASK_PT_FR30, offsetof(struct task_struct, thread.regs.fr[30]));
DEFINE(TASK_PT_FR31, offsetof(struct task_struct, thread.regs.fr[31]));
DEFINE(TASK_PT_SR0, offsetof(struct task_struct, thread.regs.sr[ 0]));
DEFINE(TASK_PT_SR1, offsetof(struct task_struct, thread.regs.sr[ 1]));
DEFINE(TASK_PT_SR2, offsetof(struct task_struct, thread.regs.sr[ 2]));
DEFINE(TASK_PT_SR3, offsetof(struct task_struct, thread.regs.sr[ 3]));
DEFINE(TASK_PT_SR4, offsetof(struct task_struct, thread.regs.sr[ 4]));
DEFINE(TASK_PT_SR5, offsetof(struct task_struct, thread.regs.sr[ 5]));
DEFINE(TASK_PT_SR6, offsetof(struct task_struct, thread.regs.sr[ 6]));
DEFINE(TASK_PT_SR7, offsetof(struct task_struct, thread.regs.sr[ 7]));
DEFINE(TASK_PT_IASQ0, offsetof(struct task_struct, thread.regs.iasq[0]));
DEFINE(TASK_PT_IASQ1, offsetof(struct task_struct, thread.regs.iasq[1]));
DEFINE(TASK_PT_IAOQ0, offsetof(struct task_struct, thread.regs.iaoq[0]));
DEFINE(TASK_PT_IAOQ1, offsetof(struct task_struct, thread.regs.iaoq[1]));
DEFINE(TASK_PT_CR27, offsetof(struct task_struct, thread.regs.cr27));
DEFINE(TASK_PT_ORIG_R28, offsetof(struct task_struct, thread.regs.orig_r28));
DEFINE(TASK_PT_KSP, offsetof(struct task_struct, thread.regs.ksp));
DEFINE(TASK_PT_KPC, offsetof(struct task_struct, thread.regs.kpc));
DEFINE(TASK_PT_SAR, offsetof(struct task_struct, thread.regs.sar));
DEFINE(TASK_PT_IIR, offsetof(struct task_struct, thread.regs.iir));
DEFINE(TASK_PT_ISR, offsetof(struct task_struct, thread.regs.isr));
DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
BLANK();
DEFINE(TASK_SZ, sizeof(struct task_struct));
DEFINE(TASK_SZ_ALGN, align(sizeof(struct task_struct), 64));
BLANK();
DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0]));
DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1]));
DEFINE(PT_GR2, offsetof(struct pt_regs, gr[ 2]));
DEFINE(PT_GR3, offsetof(struct pt_regs, gr[ 3]));
DEFINE(PT_GR4, offsetof(struct pt_regs, gr[ 4]));
DEFINE(PT_GR5, offsetof(struct pt_regs, gr[ 5]));
DEFINE(PT_GR6, offsetof(struct pt_regs, gr[ 6]));
DEFINE(PT_GR7, offsetof(struct pt_regs, gr[ 7]));
DEFINE(PT_GR8, offsetof(struct pt_regs, gr[ 8]));
DEFINE(PT_GR9, offsetof(struct pt_regs, gr[ 9]));
DEFINE(PT_GR10, offsetof(struct pt_regs, gr[10]));
DEFINE(PT_GR11, offsetof(struct pt_regs, gr[11]));
DEFINE(PT_GR12, offsetof(struct pt_regs, gr[12]));
DEFINE(PT_GR13, offsetof(struct pt_regs, gr[13]));
DEFINE(PT_GR14, offsetof(struct pt_regs, gr[14]));
DEFINE(PT_GR15, offsetof(struct pt_regs, gr[15]));
DEFINE(PT_GR16, offsetof(struct pt_regs, gr[16]));
DEFINE(PT_GR17, offsetof(struct pt_regs, gr[17]));
DEFINE(PT_GR18, offsetof(struct pt_regs, gr[18]));
DEFINE(PT_GR19, offsetof(struct pt_regs, gr[19]));
DEFINE(PT_GR20, offsetof(struct pt_regs, gr[20]));
DEFINE(PT_GR21, offsetof(struct pt_regs, gr[21]));
DEFINE(PT_GR22, offsetof(struct pt_regs, gr[22]));
DEFINE(PT_GR23, offsetof(struct pt_regs, gr[23]));
DEFINE(PT_GR24, offsetof(struct pt_regs, gr[24]));
DEFINE(PT_GR25, offsetof(struct pt_regs, gr[25]));
DEFINE(PT_GR26, offsetof(struct pt_regs, gr[26]));
DEFINE(PT_GR27, offsetof(struct pt_regs, gr[27]));
DEFINE(PT_GR28, offsetof(struct pt_regs, gr[28]));
DEFINE(PT_GR29, offsetof(struct pt_regs, gr[29]));
DEFINE(PT_GR30, offsetof(struct pt_regs, gr[30]));
DEFINE(PT_GR31, offsetof(struct pt_regs, gr[31]));
DEFINE(PT_FR0, offsetof(struct pt_regs, fr[ 0]));
DEFINE(PT_FR1, offsetof(struct pt_regs, fr[ 1]));
DEFINE(PT_FR2, offsetof(struct pt_regs, fr[ 2]));
DEFINE(PT_FR3, offsetof(struct pt_regs, fr[ 3]));
DEFINE(PT_FR4, offsetof(struct pt_regs, fr[ 4]));
DEFINE(PT_FR5, offsetof(struct pt_regs, fr[ 5]));
DEFINE(PT_FR6, offsetof(struct pt_regs, fr[ 6]));
DEFINE(PT_FR7, offsetof(struct pt_regs, fr[ 7]));
DEFINE(PT_FR8, offsetof(struct pt_regs, fr[ 8]));
DEFINE(PT_FR9, offsetof(struct pt_regs, fr[ 9]));
DEFINE(PT_FR10, offsetof(struct pt_regs, fr[10]));
DEFINE(PT_FR11, offsetof(struct pt_regs, fr[11]));
DEFINE(PT_FR12, offsetof(struct pt_regs, fr[12]));
DEFINE(PT_FR13, offsetof(struct pt_regs, fr[13]));
DEFINE(PT_FR14, offsetof(struct pt_regs, fr[14]));
DEFINE(PT_FR15, offsetof(struct pt_regs, fr[15]));
DEFINE(PT_FR16, offsetof(struct pt_regs, fr[16]));
DEFINE(PT_FR17, offsetof(struct pt_regs, fr[17]));
DEFINE(PT_FR18, offsetof(struct pt_regs, fr[18]));
DEFINE(PT_FR19, offsetof(struct pt_regs, fr[19]));
DEFINE(PT_FR20, offsetof(struct pt_regs, fr[20]));
DEFINE(PT_FR21, offsetof(struct pt_regs, fr[21]));
DEFINE(PT_FR22, offsetof(struct pt_regs, fr[22]));
DEFINE(PT_FR23, offsetof(struct pt_regs, fr[23]));
DEFINE(PT_FR24, offsetof(struct pt_regs, fr[24]));
DEFINE(PT_FR25, offsetof(struct pt_regs, fr[25]));
DEFINE(PT_FR26, offsetof(struct pt_regs, fr[26]));
DEFINE(PT_FR27, offsetof(struct pt_regs, fr[27]));
DEFINE(PT_FR28, offsetof(struct pt_regs, fr[28]));
DEFINE(PT_FR29, offsetof(struct pt_regs, fr[29]));
DEFINE(PT_FR30, offsetof(struct pt_regs, fr[30]));
DEFINE(PT_FR31, offsetof(struct pt_regs, fr[31]));
DEFINE(PT_SR0, offsetof(struct pt_regs, sr[ 0]));
DEFINE(PT_SR1, offsetof(struct pt_regs, sr[ 1]));
DEFINE(PT_SR2, offsetof(struct pt_regs, sr[ 2]));
DEFINE(PT_SR3, offsetof(struct pt_regs, sr[ 3]));
DEFINE(PT_SR4, offsetof(struct pt_regs, sr[ 4]));
DEFINE(PT_SR5, offsetof(struct pt_regs, sr[ 5]));
DEFINE(PT_SR6, offsetof(struct pt_regs, sr[ 6]));
DEFINE(PT_SR7, offsetof(struct pt_regs, sr[ 7]));
DEFINE(PT_IASQ0, offsetof(struct pt_regs, iasq[0]));
DEFINE(PT_IASQ1, offsetof(struct pt_regs, iasq[1]));
DEFINE(PT_IAOQ0, offsetof(struct pt_regs, iaoq[0]));
DEFINE(PT_IAOQ1, offsetof(struct pt_regs, iaoq[1]));
DEFINE(PT_CR27, offsetof(struct pt_regs, cr27));
DEFINE(PT_ORIG_R28, offsetof(struct pt_regs, orig_r28));
DEFINE(PT_KSP, offsetof(struct pt_regs, ksp));
DEFINE(PT_KPC, offsetof(struct pt_regs, kpc));
DEFINE(PT_SAR, offsetof(struct pt_regs, sar));
DEFINE(PT_IIR, offsetof(struct pt_regs, iir));
DEFINE(PT_ISR, offsetof(struct pt_regs, isr));
DEFINE(PT_IOR, offsetof(struct pt_regs, ior));
DEFINE(PT_SIZE, sizeof(struct pt_regs));
DEFINE(PT_SZ_ALGN, align(sizeof(struct pt_regs), 64));
BLANK();
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(THREAD_SZ, sizeof(struct thread_info));
DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64));
BLANK();
DEFINE(IRQSTAT_SIRQ_PEND, offsetof(irq_cpustat_t, __softirq_pending));
DEFINE(IRQSTAT_SZ, sizeof(irq_cpustat_t));
BLANK();
DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count));
DEFINE(ICACHE_LOOP, offsetof(struct pdc_cache_info, ic_loop));
DEFINE(DCACHE_BASE, offsetof(struct pdc_cache_info, dc_base));
DEFINE(DCACHE_STRIDE, offsetof(struct pdc_cache_info, dc_stride));
DEFINE(DCACHE_COUNT, offsetof(struct pdc_cache_info, dc_count));
DEFINE(DCACHE_LOOP, offsetof(struct pdc_cache_info, dc_loop));
DEFINE(ITLB_SID_BASE, offsetof(struct pdc_cache_info, it_sp_base));
DEFINE(ITLB_SID_STRIDE, offsetof(struct pdc_cache_info, it_sp_stride));
DEFINE(ITLB_SID_COUNT, offsetof(struct pdc_cache_info, it_sp_count));
DEFINE(ITLB_OFF_BASE, offsetof(struct pdc_cache_info, it_off_base));
DEFINE(ITLB_OFF_STRIDE, offsetof(struct pdc_cache_info, it_off_stride));
DEFINE(ITLB_OFF_COUNT, offsetof(struct pdc_cache_info, it_off_count));
DEFINE(ITLB_LOOP, offsetof(struct pdc_cache_info, it_loop));
DEFINE(DTLB_SID_BASE, offsetof(struct pdc_cache_info, dt_sp_base));
DEFINE(DTLB_SID_STRIDE, offsetof(struct pdc_cache_info, dt_sp_stride));
DEFINE(DTLB_SID_COUNT, offsetof(struct pdc_cache_info, dt_sp_count));
DEFINE(DTLB_OFF_BASE, offsetof(struct pdc_cache_info, dt_off_base));
DEFINE(DTLB_OFF_STRIDE, offsetof(struct pdc_cache_info, dt_off_stride));
DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count));
DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop));
BLANK();
DEFINE(PA_BLOCKSTEP_BIT, 31-PT_BLOCKSTEP_BIT);
DEFINE(PA_SINGLESTEP_BIT, 31-PT_SINGLESTEP_BIT);
BLANK();
DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD);
DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD);
DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE);
DEFINE(ASM_PGD_PMD_OFFSET, -(PAGE_SIZE << PGD_ORDER));
DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT));
DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT);
DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE);
DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
DEFINE(ASM_PT_INITIAL, PT_INITIAL);
DEFINE(ASM_PAGE_SIZE, PAGE_SIZE);
BLANK();
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
return 0;
}

View File

@@ -0,0 +1,126 @@
/*
* Support for 32-bit Linux/Parisc ELF binaries on 64 bit kernels
*
* Copyright (C) 2000 John Marvin
* Copyright (C) 2000 Hewlett Packard Co.
*
* Heavily inspired from various other efforts to do the same thing
* (ia64,sparc64/mips64)
*/
/* Make sure include/asm-parisc/elf.h does the right thing */
#define ELF_CLASS ELFCLASS32
#define ELF_CORE_COPY_REGS(dst, pt) \
memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \
{ int i; \
for (i = 0; i < 32; i++) dst[i] = (elf_greg_t) pt->gr[i]; \
for (i = 0; i < 8; i++) dst[32 + i] = (elf_greg_t) pt->sr[i]; \
} \
dst[40] = (elf_greg_t) pt->iaoq[0]; dst[41] = (elf_greg_t) pt->iaoq[1]; \
dst[42] = (elf_greg_t) pt->iasq[0]; dst[43] = (elf_greg_t) pt->iasq[1]; \
dst[44] = (elf_greg_t) pt->sar; dst[45] = (elf_greg_t) pt->iir; \
dst[46] = (elf_greg_t) pt->isr; dst[47] = (elf_greg_t) pt->ior; \
dst[48] = (elf_greg_t) mfctl(22); dst[49] = (elf_greg_t) mfctl(0); \
dst[50] = (elf_greg_t) mfctl(24); dst[51] = (elf_greg_t) mfctl(25); \
dst[52] = (elf_greg_t) mfctl(26); dst[53] = (elf_greg_t) mfctl(27); \
dst[54] = (elf_greg_t) mfctl(28); dst[55] = (elf_greg_t) mfctl(29); \
dst[56] = (elf_greg_t) mfctl(30); dst[57] = (elf_greg_t) mfctl(31); \
dst[58] = (elf_greg_t) mfctl( 8); dst[59] = (elf_greg_t) mfctl( 9); \
dst[60] = (elf_greg_t) mfctl(12); dst[61] = (elf_greg_t) mfctl(13); \
dst[62] = (elf_greg_t) mfctl(10); dst[63] = (elf_greg_t) mfctl(15);
typedef unsigned int elf_greg_t;
#include <linux/spinlock.h>
#include <asm/processor.h>
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h> /* struct compat_timeval */
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
unsigned int pr_sigpend; /* Set of pending signals */
unsigned int pr_sighold; /* Set of held signals */
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct compat_timeval pr_utime; /* User time */
struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime; /* Cumulative user time */
struct compat_timeval pr_cstime; /* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
#define elf_prpsinfo elf_prpsinfo32
struct elf_prpsinfo32
{
char pr_state; /* numeric process state */
char pr_sname; /* char for pr_state */
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
unsigned int pr_flag; /* flags */
u16 pr_uid;
u16 pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define elf_addr_t unsigned int
#define init_elf_binfmt init_elf32_binfmt
#define ELF_PLATFORM ("PARISC32\0")
/*
* We should probably use this macro to set a flag somewhere to indicate
* this is a 32 on 64 process. We could use PER_LINUX_32BIT, or we
* could set a processor dependent flag in the thread_struct.
*/
#define SET_PERSONALITY(ex, ibcs2) \
current->personality = PER_LINUX32; \
current->thread.map_base = DEFAULT_MAP_BASE32; \
current->thread.task_size = DEFAULT_TASK_SIZE32 \
#undef cputime_to_timeval
#define cputime_to_timeval cputime_to_compat_timeval
static __inline__ void
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
{
unsigned long jiffies = cputime_to_jiffies(cputime);
value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
value->tv_sec = jiffies / HZ;
}
#include "../../../fs/binfmt_elf.c"
/* Set up a separate execution domain for ELF32 binaries running
* on an ELF64 kernel */
static struct exec_domain parisc32_exec_domain = {
.name = "Linux/ELF32",
.pers_low = PER_LINUX32,
.pers_high = PER_LINUX32,
};
static int __init parisc32_exec_init(void)
{
/* steal the identity signal mappings from the default domain */
parisc32_exec_domain.signal_map = default_exec_domain.signal_map;
parisc32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
register_exec_domain(&parisc32_exec_domain);
return 0;
}
__initcall(parisc32_exec_init);

366
arch/parisc/kernel/cache.c Normal file
View File

@@ -0,0 +1,366 @@
/* $Id: cache.c,v 1.4 2000/01/25 00:11:38 prumpf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999 Helge Deller (07-13-1999)
* Copyright (C) 1999 SuSE GmbH Nuernberg
* Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
*
* Cache and TLB management
*
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <asm/pdc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
int split_tlb;
int dcache_stride;
int icache_stride;
EXPORT_SYMBOL(dcache_stride);
#if defined(CONFIG_SMP)
/* On some machines (e.g. ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
* by software. We put a spinlock around all TLB flushes to
* ensure this.
*/
DEFINE_SPINLOCK(pa_tlb_lock);
EXPORT_SYMBOL(pa_tlb_lock);
#endif
struct pdc_cache_info cache_info;
#ifndef CONFIG_PA20
static struct pdc_btlb_info btlb_info;
#endif
#ifdef CONFIG_SMP
void
flush_data_cache(void)
{
on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1);
}
void
flush_instruction_cache(void)
{
on_each_cpu((void (*)(void *))flush_instruction_cache_local, NULL, 1, 1);
}
#endif
void
flush_cache_all_local(void)
{
flush_instruction_cache_local();
flush_data_cache_local();
}
EXPORT_SYMBOL(flush_cache_all_local);
/* flushes EVERYTHING (tlb & cache) */
void
flush_all_caches(void)
{
flush_cache_all();
flush_tlb_all();
}
EXPORT_SYMBOL(flush_all_caches);
void
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
struct page *page = pte_page(pte);
if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
test_bit(PG_dcache_dirty, &page->flags)) {
flush_kernel_dcache_page(page_address(page));
clear_bit(PG_dcache_dirty, &page->flags);
}
}
void
show_cache_info(struct seq_file *m)
{
seq_printf(m, "I-cache\t\t: %ld KB\n",
cache_info.ic_size/1024 );
seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %d-way associative)\n",
cache_info.dc_size/1024,
(cache_info.dc_conf.cc_wt ? "WT":"WB"),
(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
(cache_info.dc_conf.cc_assoc)
);
seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
cache_info.it_size,
cache_info.dt_size,
cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
);
#ifndef CONFIG_PA20
/* BTLB - Block TLB */
if (btlb_info.max_size==0) {
seq_printf(m, "BTLB\t\t: not supported\n" );
} else {
seq_printf(m,
"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
btlb_info.max_size, (int)4096,
btlb_info.max_size>>8,
btlb_info.fixed_range_info.num_i,
btlb_info.fixed_range_info.num_d,
btlb_info.fixed_range_info.num_comb,
btlb_info.variable_range_info.num_i,
btlb_info.variable_range_info.num_d,
btlb_info.variable_range_info.num_comb
);
}
#endif
}
void __init
parisc_cache_init(void)
{
if (pdc_cache_info(&cache_info) < 0)
panic("parisc_cache_init: pdc_cache_info failed");
#if 0
printk("ic_size %lx dc_size %lx it_size %lx\n",
cache_info.ic_size,
cache_info.dc_size,
cache_info.it_size);
printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
cache_info.dc_base,
cache_info.dc_stride,
cache_info.dc_count,
cache_info.dc_loop);
printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
*(unsigned long *) (&cache_info.dc_conf),
cache_info.dc_conf.cc_alias,
cache_info.dc_conf.cc_block,
cache_info.dc_conf.cc_line,
cache_info.dc_conf.cc_shift);
printk(" wt %d sh %d cst %d assoc %d\n",
cache_info.dc_conf.cc_wt,
cache_info.dc_conf.cc_sh,
cache_info.dc_conf.cc_cst,
cache_info.dc_conf.cc_assoc);
printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
cache_info.ic_base,
cache_info.ic_stride,
cache_info.ic_count,
cache_info.ic_loop);
printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
*(unsigned long *) (&cache_info.ic_conf),
cache_info.ic_conf.cc_alias,
cache_info.ic_conf.cc_block,
cache_info.ic_conf.cc_line,
cache_info.ic_conf.cc_shift);
printk(" wt %d sh %d cst %d assoc %d\n",
cache_info.ic_conf.cc_wt,
cache_info.ic_conf.cc_sh,
cache_info.ic_conf.cc_cst,
cache_info.ic_conf.cc_assoc);
printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n",
cache_info.dt_conf.tc_sh,
cache_info.dt_conf.tc_page,
cache_info.dt_conf.tc_cst,
cache_info.dt_conf.tc_aid,
cache_info.dt_conf.tc_pad1);
printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n",
cache_info.it_conf.tc_sh,
cache_info.it_conf.tc_page,
cache_info.it_conf.tc_cst,
cache_info.it_conf.tc_aid,
cache_info.it_conf.tc_pad1);
#endif
split_tlb = 0;
if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
if (cache_info.dt_conf.tc_sh == 2)
printk(KERN_WARNING "Unexpected TLB configuration. "
"Will flush I/D separately (could be optimized).\n");
split_tlb = 1;
}
/* "New and Improved" version from Jim Hull
* (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
*/
#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
icache_stride = CAFL_STRIDE(cache_info.ic_conf);
#undef CAFL_STRIDE
#ifndef CONFIG_PA20
if (pdc_btlb_info(&btlb_info) < 0) {
memset(&btlb_info, 0, sizeof btlb_info);
}
#endif
if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
PDC_MODEL_NVA_UNSUPPORTED) {
printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
#if 0
panic("SMP kernel required to avoid non-equivalent aliasing");
#endif
}
}
void disable_sr_hashing(void)
{
int srhash_type;
switch (boot_cpu_data.cpu_type) {
case pcx: /* We shouldn't get this far. setup.c should prevent it. */
BUG();
return;
case pcxs:
case pcxt:
case pcxt_:
srhash_type = SRHASH_PCXST;
break;
case pcxl:
srhash_type = SRHASH_PCXL;
break;
case pcxl2: /* pcxl2 doesn't support space register hashing */
return;
default: /* Currently all PA2.0 machines use the same ins. sequence */
srhash_type = SRHASH_PA20;
break;
}
disable_sr_hashing_asm(srhash_type);
}
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct vm_area_struct *mpnt;
struct prio_tree_iter iter;
unsigned long offset;
unsigned long addr;
pgoff_t pgoff;
pte_t *pte;
unsigned long pfn = page_to_pfn(page);
if (mapping && !mapping_mapped(mapping)) {
set_bit(PG_dcache_dirty, &page->flags);
return;
}
flush_kernel_dcache_page(page_address(page));
if (!mapping)
return;
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
/* We have carefully arranged in arch_get_unmapped_area() that
* *any* mappings of a file are always congruently mapped (whether
* declared as MAP_PRIVATE or MAP_SHARED), so we only need
* to flush one address here for them all to become coherent */
flush_dcache_mmap_lock(mapping);
vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
addr = mpnt->vm_start + offset;
/* Flush instructions produce non access tlb misses.
* On PA, we nullify these instructions rather than
* taking a page fault if the pte doesn't exist.
* This is just for speed. If the page translation
* isn't there, there's no point exciting the
* nadtlb handler into a nullification frenzy */
if(!(pte = translation_exists(mpnt, addr)))
continue;
/* make sure we really have this page: the private
* mappings may cover this area but have COW'd this
* particular page */
if(pte_pfn(*pte) != pfn)
continue;
__flush_cache_page(mpnt, addr);
break;
}
flush_dcache_mmap_unlock(mapping);
}
EXPORT_SYMBOL(flush_dcache_page);
/* Defined in arch/parisc/kernel/pacache.S */
EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
EXPORT_SYMBOL(flush_kernel_dcache_page);
EXPORT_SYMBOL(flush_data_cache_local);
EXPORT_SYMBOL(flush_kernel_icache_range_asm);
void clear_user_page_asm(void *page, unsigned long vaddr)
{
/* This function is implemented in assembly in pacache.S */
extern void __clear_user_page_asm(void *page, unsigned long vaddr);
purge_tlb_start();
__clear_user_page_asm(page, vaddr);
purge_tlb_end();
}
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
int parisc_cache_flush_threshold = FLUSH_THRESHOLD;
void parisc_setup_cache_timing(void)
{
unsigned long rangetime, alltime;
extern char _text; /* start of kernel code, defined by linker */
extern char _end; /* end of BSS, defined by linker */
unsigned long size;
alltime = mfctl(16);
flush_data_cache();
alltime = mfctl(16) - alltime;
size = (unsigned long)(&_end - _text);
rangetime = mfctl(16);
flush_kernel_dcache_range((unsigned long)&_text, size);
rangetime = mfctl(16) - rangetime;
printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
alltime, size, rangetime);
/* Racy, but if we see an intermediate value, it's ok too... */
parisc_cache_flush_threshold = size * alltime / rangetime;
parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
if (!parisc_cache_flush_threshold)
parisc_cache_flush_threshold = FLUSH_THRESHOLD;
printk("Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
}

View File

@@ -0,0 +1,765 @@
/*
* drivers.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Copyright (c) 1999 The Puffin Group
* Copyright (c) 2001 Matthew Wilcox for Hewlett Packard
* Copyright (c) 2001 Helge Deller <deller@gmx.de>
* Copyright (c) 2001,2002 Ryan Bradetich
* Copyright (c) 2004-2005 Thibaut VARENE <varenet@parisc-linux.org>
*
* The file handles registering devices and drivers, then matching them.
* It's the closest we get to a dating agency.
*
* If you're thinking about modifying this file, here are some gotchas to
* bear in mind:
* - 715/Mirage device paths have a dummy device between Lasi and its children
* - The EISA adapter may show up as a sibling or child of Wax
* - Dino has an optionally functional serial port. If firmware enables it,
* it shows up as a child of Dino. If firmware disables it, the buswalk
* finds it and it shows up as a child of Cujo
* - Dino has both parisc and pci devices as children
* - parisc devices are discovered in a random order, including children
* before parents in some cases.
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/pdc.h>
#include <asm/parisc-device.h>
/* See comments in include/asm-parisc/pci.h */
struct hppa_dma_ops *hppa_dma_ops;
EXPORT_SYMBOL(hppa_dma_ops);
static struct device root = {
.bus_id = "parisc",
};
#define for_each_padev(padev) \
for (padev = next_dev(&root); padev != NULL; \
padev = next_dev(&padev->dev))
#define check_dev(padev) \
(padev->id.hw_type != HPHW_FAULTY) ? padev : next_dev(&padev->dev)
/**
* next_dev - enumerates registered devices
* @dev: the previous device returned from next_dev
*
* next_dev does a depth-first search of the tree, returning parents
* before children. Returns NULL when there are no more devices.
*/
static struct parisc_device *next_dev(struct device *dev)
{
if (!list_empty(&dev->children)) {
dev = list_to_dev(dev->children.next);
return check_dev(to_parisc_device(dev));
}
while (dev != &root) {
if (dev->node.next != &dev->parent->children) {
dev = list_to_dev(dev->node.next);
return to_parisc_device(dev);
}
dev = dev->parent;
}
return NULL;
}
/**
* match_device - Report whether this driver can handle this device
* @driver: the PA-RISC driver to try
* @dev: the PA-RISC device to try
*/
static int match_device(struct parisc_driver *driver, struct parisc_device *dev)
{
const struct parisc_device_id *ids;
for (ids = driver->id_table; ids->sversion; ids++) {
if ((ids->sversion != SVERSION_ANY_ID) &&
(ids->sversion != dev->id.sversion))
continue;
if ((ids->hw_type != HWTYPE_ANY_ID) &&
(ids->hw_type != dev->id.hw_type))
continue;
if ((ids->hversion != HVERSION_ANY_ID) &&
(ids->hversion != dev->id.hversion))
continue;
return 1;
}
return 0;
}
static void claim_device(struct parisc_driver *driver, struct parisc_device *dev)
{
dev->driver = driver;
request_mem_region(dev->hpa, 0x1000, driver->name);
}
static int parisc_driver_probe(struct device *dev)
{
int rc;
struct parisc_device *pa_dev = to_parisc_device(dev);
struct parisc_driver *pa_drv = to_parisc_driver(dev->driver);
rc = pa_drv->probe(pa_dev);
if(!rc)
claim_device(pa_drv, pa_dev);
return rc;
}
static int parisc_driver_remove(struct device *dev)
{
struct parisc_device *pa_dev = to_parisc_device(dev);
struct parisc_driver *pa_drv = to_parisc_driver(dev->driver);
if (pa_drv->remove)
pa_drv->remove(pa_dev);
release_mem_region(pa_dev->hpa, 0x1000);
return 0;
}
/**
* register_parisc_driver - Register this driver if it can handle a device
* @driver: the PA-RISC driver to try
*/
int register_parisc_driver(struct parisc_driver *driver)
{
/* FIXME: we need this because apparently the sti
* driver can be registered twice */
if(driver->drv.name) {
printk(KERN_WARNING
"BUG: skipping previously registered driver %s\n",
driver->name);
return 1;
}
if (!driver->probe) {
printk(KERN_WARNING
"BUG: driver %s has no probe routine\n",
driver->name);
return 1;
}
driver->drv.bus = &parisc_bus_type;
/* We install our own probe and remove routines */
WARN_ON(driver->drv.probe != NULL);
WARN_ON(driver->drv.remove != NULL);
driver->drv.probe = parisc_driver_probe;
driver->drv.remove = parisc_driver_remove;
driver->drv.name = driver->name;
return driver_register(&driver->drv);
}
EXPORT_SYMBOL(register_parisc_driver);
/**
* count_parisc_driver - count # of devices this driver would match
* @driver: the PA-RISC driver to try
*
* Use by IOMMU support to "guess" the right size IOPdir.
* Formula is something like memsize/(num_iommu * entry_size).
*/
int count_parisc_driver(struct parisc_driver *driver)
{
struct parisc_device *device;
int cnt = 0;
for_each_padev(device) {
if (match_device(driver, device))
cnt++;
}
return cnt;
}
/**
* unregister_parisc_driver - Unregister this driver from the list of drivers
* @driver: the PA-RISC driver to unregister
*/
int unregister_parisc_driver(struct parisc_driver *driver)
{
driver_unregister(&driver->drv);
return 0;
}
EXPORT_SYMBOL(unregister_parisc_driver);
static struct parisc_device *find_device_by_addr(unsigned long hpa)
{
struct parisc_device *dev;
for_each_padev(dev) {
if (dev->hpa == hpa)
return dev;
}
return NULL;
}
/**
* find_pa_parent_type - Find a parent of a specific type
* @dev: The device to start searching from
* @type: The device type to search for.
*
* Walks up the device tree looking for a device of the specified type.
* If it finds it, it returns it. If not, it returns NULL.
*/
const struct parisc_device *
find_pa_parent_type(const struct parisc_device *padev, int type)
{
const struct device *dev = &padev->dev;
while (dev != &root) {
struct parisc_device *candidate = to_parisc_device(dev);
if (candidate->id.hw_type == type)
return candidate;
dev = dev->parent;
}
return NULL;
}
#ifdef CONFIG_PCI
static inline int is_pci_dev(struct device *dev)
{
return dev->bus == &pci_bus_type;
}
#else
static inline int is_pci_dev(struct device *dev)
{
return 0;
}
#endif
/*
* get_node_path fills in @path with the firmware path to the device.
* Note that if @node is a parisc device, we don't fill in the 'mod' field.
* This is because both callers pass the parent and fill in the mod
* themselves. If @node is a PCI device, we do fill it in, even though this
* is inconsistent.
*/
static void get_node_path(struct device *dev, struct hardware_path *path)
{
int i = 5;
memset(&path->bc, -1, 6);
if (is_pci_dev(dev)) {
unsigned int devfn = to_pci_dev(dev)->devfn;
path->mod = PCI_FUNC(devfn);
path->bc[i--] = PCI_SLOT(devfn);
dev = dev->parent;
}
while (dev != &root) {
if (is_pci_dev(dev)) {
unsigned int devfn = to_pci_dev(dev)->devfn;
path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5);
} else if (dev->bus == &parisc_bus_type) {
path->bc[i--] = to_parisc_device(dev)->hw_path;
}
dev = dev->parent;
}
}
static char *print_hwpath(struct hardware_path *path, char *output)
{
int i;
for (i = 0; i < 6; i++) {
if (path->bc[i] == -1)
continue;
output += sprintf(output, "%u/", (unsigned char) path->bc[i]);
}
output += sprintf(output, "%u", (unsigned char) path->mod);
return output;
}
/**
* print_pa_hwpath - Returns hardware path for PA devices
* dev: The device to return the path for
* output: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the output array with a human-readable path
* to a PA device. This string is compatible with that used by PDC, and
* may be printed on the outside of the box.
*/
char *print_pa_hwpath(struct parisc_device *dev, char *output)
{
struct hardware_path path;
get_node_path(dev->dev.parent, &path);
path.mod = dev->hw_path;
return print_hwpath(&path, output);
}
EXPORT_SYMBOL(print_pa_hwpath);
#if defined(CONFIG_PCI) || defined(CONFIG_ISA)
/**
* get_pci_node_path - Determines the hardware path for a PCI device
* @pdev: The device to return the path for
* @path: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the hardware_path structure with the route to
* the specified PCI device. This structure is suitable for passing to
* PDC calls.
*/
void get_pci_node_path(struct pci_dev *pdev, struct hardware_path *path)
{
get_node_path(&pdev->dev, path);
}
EXPORT_SYMBOL(get_pci_node_path);
/**
* print_pci_hwpath - Returns hardware path for PCI devices
* dev: The device to return the path for
* output: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the output array with a human-readable path
* to a PCI device. This string is compatible with that used by PDC, and
* may be printed on the outside of the box.
*/
char *print_pci_hwpath(struct pci_dev *dev, char *output)
{
struct hardware_path path;
get_pci_node_path(dev, &path);
return print_hwpath(&path, output);
}
EXPORT_SYMBOL(print_pci_hwpath);
#endif /* defined(CONFIG_PCI) || defined(CONFIG_ISA) */
static void setup_bus_id(struct parisc_device *padev)
{
struct hardware_path path;
char *output = padev->dev.bus_id;
int i;
get_node_path(padev->dev.parent, &path);
for (i = 0; i < 6; i++) {
if (path.bc[i] == -1)
continue;
output += sprintf(output, "%u:", (unsigned char) path.bc[i]);
}
sprintf(output, "%u", (unsigned char) padev->hw_path);
}
struct parisc_device * create_tree_node(char id, struct device *parent)
{
struct parisc_device *dev = kmalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
memset(dev, 0, sizeof(*dev));
dev->hw_path = id;
dev->id.hw_type = HPHW_FAULTY;
dev->dev.parent = parent;
setup_bus_id(dev);
dev->dev.bus = &parisc_bus_type;
dev->dma_mask = 0xffffffffUL; /* PARISC devices are 32-bit */
/* make the generic dma mask a pointer to the parisc one */
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.coherent_dma_mask = dev->dma_mask;
device_register(&dev->dev);
return dev;
}
/**
* alloc_tree_node - returns a device entry in the iotree
* @parent: the parent node in the tree
* @id: the element of the module path for this entry
*
* Checks all the children of @parent for a matching @id. If none
* found, it allocates a new device and returns it.
*/
static struct parisc_device * alloc_tree_node(struct device *parent, char id)
{
struct device *dev;
list_for_each_entry(dev, &parent->children, node) {
struct parisc_device *padev = to_parisc_device(dev);
if (padev->hw_path == id)
return padev;
}
return create_tree_node(id, parent);
}
static struct parisc_device *create_parisc_device(struct hardware_path *modpath)
{
int i;
struct device *parent = &root;
for (i = 0; i < 6; i++) {
if (modpath->bc[i] == -1)
continue;
parent = &alloc_tree_node(parent, modpath->bc[i])->dev;
}
return alloc_tree_node(parent, modpath->mod);
}
struct parisc_device *
alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
{
int status;
unsigned long bytecnt;
u8 iodc_data[32];
struct parisc_device *dev;
const char *name;
/* Check to make sure this device has not already been added - Ryan */
if (find_device_by_addr(hpa) != NULL)
return NULL;
status = pdc_iodc_read(&bytecnt, hpa, 0, &iodc_data, 32);
if (status != PDC_OK)
return NULL;
dev = create_parisc_device(mod_path);
if (dev->id.hw_type != HPHW_FAULTY) {
char p[64];
print_pa_hwpath(dev, p);
printk("Two devices have hardware path %s. Please file a bug with HP.\n"
"In the meantime, you could try rearranging your cards.\n", p);
return NULL;
}
dev->id.hw_type = iodc_data[3] & 0x1f;
dev->id.hversion = (iodc_data[0] << 4) | ((iodc_data[1] & 0xf0) >> 4);
dev->id.hversion_rev = iodc_data[1] & 0x0f;
dev->id.sversion = ((iodc_data[4] & 0x0f) << 16) |
(iodc_data[5] << 8) | iodc_data[6];
dev->hpa = hpa;
name = parisc_hardware_description(&dev->id);
if (name) {
strlcpy(dev->name, name, sizeof(dev->name));
}
return dev;
}
static int parisc_generic_match(struct device *dev, struct device_driver *drv)
{
return match_device(to_parisc_driver(drv), to_parisc_device(dev));
}
#define pa_dev_attr(name, field, format_string) \
static ssize_t name##_show(struct device *dev, char *buf) \
{ \
struct parisc_device *padev = to_parisc_device(dev); \
return sprintf(buf, format_string, padev->field); \
}
#define pa_dev_attr_id(field, format) pa_dev_attr(field, id.field, format)
pa_dev_attr(irq, irq, "%u\n");
pa_dev_attr_id(hw_type, "0x%02x\n");
pa_dev_attr(rev, id.hversion_rev, "0x%x\n");
pa_dev_attr_id(hversion, "0x%03x\n");
pa_dev_attr_id(sversion, "0x%05x\n");
static struct device_attribute parisc_device_attrs[] = {
__ATTR_RO(irq),
__ATTR_RO(hw_type),
__ATTR_RO(rev),
__ATTR_RO(hversion),
__ATTR_RO(sversion),
__ATTR_NULL,
};
struct bus_type parisc_bus_type = {
.name = "parisc",
.match = parisc_generic_match,
.dev_attrs = parisc_device_attrs,
};
/**
* register_parisc_device - Locate a driver to manage this device.
* @dev: The parisc device.
*
* Search the driver list for a driver that is willing to manage
* this device.
*/
int register_parisc_device(struct parisc_device *dev)
{
if (!dev)
return 0;
if (dev->driver)
return 1;
return 0;
}
/**
* match_pci_device - Matches a pci device against a given hardware path
* entry.
* @dev: the generic device (known to be contained by a pci_dev).
* @index: the current BC index
* @modpath: the hardware path.
* @return: true if the device matches the hardware path.
*/
static int match_pci_device(struct device *dev, int index,
struct hardware_path *modpath)
{
struct pci_dev *pdev = to_pci_dev(dev);
int id;
if (index == 5) {
/* we are at the end of the path, and on the actual device */
unsigned int devfn = pdev->devfn;
return ((modpath->bc[5] == PCI_SLOT(devfn)) &&
(modpath->mod == PCI_FUNC(devfn)));
}
id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
return (modpath->bc[index] == id);
}
/**
* match_parisc_device - Matches a parisc device against a given hardware
* path entry.
* @dev: the generic device (known to be contained by a parisc_device).
* @index: the current BC index
* @modpath: the hardware path.
* @return: true if the device matches the hardware path.
*/
static int match_parisc_device(struct device *dev, int index,
struct hardware_path *modpath)
{
struct parisc_device *curr = to_parisc_device(dev);
char id = (index == 6) ? modpath->mod : modpath->bc[index];
return (curr->hw_path == id);
}
/**
* parse_tree_node - returns a device entry in the iotree
* @parent: the parent node in the tree
* @index: the current BC index
* @modpath: the hardware_path struct to match a device against
* @return: The corresponding device if found, NULL otherwise.
*
* Checks all the children of @parent for a matching @id. If none
* found, it returns NULL.
*/
static struct device *
parse_tree_node(struct device *parent, int index, struct hardware_path *modpath)
{
struct device *device;
list_for_each_entry(device, &parent->children, node) {
if (device->bus == &parisc_bus_type) {
if (match_parisc_device(device, index, modpath))
return device;
} else if (is_pci_dev(device)) {
if (match_pci_device(device, index, modpath))
return device;
} else if (device->bus == NULL) {
/* we are on a bus bridge */
struct device *new = parse_tree_node(device, index, modpath);
if (new)
return new;
}
}
return NULL;
}
/**
* hwpath_to_device - Finds the generic device corresponding to a given hardware path.
* @modpath: the hardware path.
* @return: The target device, NULL if not found.
*/
struct device *hwpath_to_device(struct hardware_path *modpath)
{
int i;
struct device *parent = &root;
for (i = 0; i < 6; i++) {
if (modpath->bc[i] == -1)
continue;
parent = parse_tree_node(parent, i, modpath);
if (!parent)
return NULL;
}
if (is_pci_dev(parent)) /* pci devices already parse MOD */
return parent;
else
return parse_tree_node(parent, 6, modpath);
}
EXPORT_SYMBOL(hwpath_to_device);
/**
* device_to_hwpath - Populates the hwpath corresponding to the given device.
* @param dev the target device
* @param path pointer to a previously allocated hwpath struct to be filled in
*/
void device_to_hwpath(struct device *dev, struct hardware_path *path)
{
struct parisc_device *padev;
if (dev->bus == &parisc_bus_type) {
padev = to_parisc_device(dev);
get_node_path(dev->parent, path);
path->mod = padev->hw_path;
} else if (is_pci_dev(dev)) {
get_node_path(dev, path);
}
}
EXPORT_SYMBOL(device_to_hwpath);
#define BC_PORT_MASK 0x8
#define BC_LOWER_PORT 0x8
#define BUS_CONVERTER(dev) \
((dev->id.hw_type == HPHW_IOA) || (dev->id.hw_type == HPHW_BCPORT))
#define IS_LOWER_PORT(dev) \
((gsc_readl(dev->hpa + offsetof(struct bc_module, io_status)) \
& BC_PORT_MASK) == BC_LOWER_PORT)
#define MAX_NATIVE_DEVICES 64
#define NATIVE_DEVICE_OFFSET 0x1000
#define FLEX_MASK F_EXTEND(0xfffc0000)
#define IO_IO_LOW offsetof(struct bc_module, io_io_low)
#define IO_IO_HIGH offsetof(struct bc_module, io_io_high)
#define READ_IO_IO_LOW(dev) (unsigned long)(signed int)gsc_readl(dev->hpa + IO_IO_LOW)
#define READ_IO_IO_HIGH(dev) (unsigned long)(signed int)gsc_readl(dev->hpa + IO_IO_HIGH)
static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
struct device *parent);
void walk_lower_bus(struct parisc_device *dev)
{
unsigned long io_io_low, io_io_high;
if(!BUS_CONVERTER(dev) || IS_LOWER_PORT(dev))
return;
if(dev->id.hw_type == HPHW_IOA) {
io_io_low = (unsigned long)(signed int)(READ_IO_IO_LOW(dev) << 16);
io_io_high = io_io_low + MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET;
} else {
io_io_low = (READ_IO_IO_LOW(dev) + ~FLEX_MASK) & FLEX_MASK;
io_io_high = (READ_IO_IO_HIGH(dev)+ ~FLEX_MASK) & FLEX_MASK;
}
walk_native_bus(io_io_low, io_io_high, &dev->dev);
}
/**
* walk_native_bus -- Probe a bus for devices
* @io_io_low: Base address of this bus.
* @io_io_high: Last address of this bus.
* @parent: The parent bus device.
*
* A native bus (eg Runway or GSC) may have up to 64 devices on it,
* spaced at intervals of 0x1000 bytes. PDC may not inform us of these
* devices, so we have to probe for them. Unfortunately, we may find
* devices which are not physically connected (such as extra serial &
* keyboard ports). This problem is not yet solved.
*/
static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
struct device *parent)
{
int i, devices_found = 0;
unsigned long hpa = io_io_low;
struct hardware_path path;
get_node_path(parent, &path);
do {
for(i = 0; i < MAX_NATIVE_DEVICES; i++, hpa += NATIVE_DEVICE_OFFSET) {
struct parisc_device *dev;
/* Was the device already added by Firmware? */
dev = find_device_by_addr(hpa);
if (!dev) {
path.mod = i;
dev = alloc_pa_dev(hpa, &path);
if (!dev)
continue;
register_parisc_device(dev);
devices_found++;
}
walk_lower_bus(dev);
}
} while(!devices_found && hpa < io_io_high);
}
#define CENTRAL_BUS_ADDR F_EXTEND(0xfff80000)
/**
* walk_central_bus - Find devices attached to the central bus
*
* PDC doesn't tell us about all devices in the system. This routine
* finds devices connected to the central bus.
*/
void walk_central_bus(void)
{
walk_native_bus(CENTRAL_BUS_ADDR,
CENTRAL_BUS_ADDR + (MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET),
&root);
}
static void print_parisc_device(struct parisc_device *dev)
{
char hw_path[64];
static int count;
print_pa_hwpath(dev, hw_path);
printk(KERN_INFO "%d. %s at 0x%lx [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
++count, dev->name, dev->hpa, hw_path, dev->id.hw_type,
dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
if (dev->num_addrs) {
int k;
printk(", additional addresses: ");
for (k = 0; k < dev->num_addrs; k++)
printk("0x%lx ", dev->addr[k]);
}
printk("\n");
}
/**
* init_parisc_bus - Some preparation to be done before inventory
*/
void init_parisc_bus(void)
{
bus_register(&parisc_bus_type);
device_register(&root);
get_device(&root);
}
/**
* print_parisc_devices - Print out a list of devices found in this system
*/
void print_parisc_devices(void)
{
struct parisc_device *dev;
for_each_padev(dev) {
print_parisc_device(dev);
}
}

2426
arch/parisc/kernel/entry.S Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

386
arch/parisc/kernel/head.S Normal file
View File

@@ -0,0 +1,386 @@
/* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999 by Helge Deller
* Copyright 1999 SuSE GmbH (Philipp Rumpf)
* Copyright 1999 Philipp Rumpf (prumpf@tux.org)
* Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
* Copyright (C) 2001 Grant Grundler (Hewlett Packard)
* Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
*
* Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
*/
#include <linux/autoconf.h> /* for CONFIG_SMP */
#include <asm/offsets.h>
#include <asm/psw.h>
#include <asm/pdc.h>
#include <asm/assembly.h>
#include <asm/pgtable.h>
.level LEVEL
.data
.export boot_args
boot_args:
.word 0 /* arg0 */
.word 0 /* arg1 */
.word 0 /* arg2 */
.word 0 /* arg3 */
.text
.align 4
.import init_thread_union,data
.import fault_vector_20,code /* IVA parisc 2.0 32 bit */
#ifndef __LP64__
.import fault_vector_11,code /* IVA parisc 1.1 32 bit */
.import $global$ /* forward declaration */
#endif /*!LP64*/
.export stext
.export _stext,data /* Kernel want it this way! */
_stext:
stext:
.proc
.callinfo
/* Make sure sr4-sr7 are set to zero for the kernel address space */
mtsp %r0,%sr4
mtsp %r0,%sr5
mtsp %r0,%sr6
mtsp %r0,%sr7
/* Clear BSS (shouldn't the boot loader do this?) */
.import __bss_start,data
.import __bss_stop,data
load32 PA(__bss_start),%r3
load32 PA(__bss_stop),%r4
$bss_loop:
cmpb,<<,n %r3,%r4,$bss_loop
stw,ma %r0,4(%r3)
/* Save away the arguments the boot loader passed in (32 bit args) */
load32 PA(boot_args),%r1
stw,ma %arg0,4(%r1)
stw,ma %arg1,4(%r1)
stw,ma %arg2,4(%r1)
stw,ma %arg3,4(%r1)
/* Initialize startup VM. Just map first 8/16 MB of memory */
load32 PA(swapper_pg_dir),%r4
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
#ifdef __LP64__
/* Set pmd in pgd */
load32 PA(pmd0),%r5
shrd %r5,PxD_VALUE_SHIFT,%r3
ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
#else
/* 2-level page table, so pmd == pgd */
ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
#endif
/* Fill in pmd with enough pte directories */
load32 PA(pg0),%r1
SHRREG %r1,PxD_VALUE_SHIFT,%r3
ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
ldi ASM_PT_INITIAL,%r1
1:
stw %r3,0(%r4)
ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
addib,> -1,%r1,1b
#ifdef __LP64__
ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
#else
ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
#endif
/* Now initialize the PTEs themselves */
ldo _PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
load32 PA(pg0),%r1
$pgt_fill_loop:
STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
ldo ASM_PAGE_SIZE(%r3),%r3
bb,>= %r3,31-KERNEL_INITIAL_ORDER,$pgt_fill_loop
nop
/* Load the return address...er...crash 'n burn */
copy %r0,%r2
/* And the RFI Target address too */
load32 start_kernel,%r11
/* And the initial task pointer */
load32 init_thread_union,%r6
mtctl %r6,%cr30
/* And the stack pointer too */
ldo THREAD_SZ_ALGN(%r6),%sp
/* And the interrupt stack */
load32 interrupt_stack,%r6
mtctl %r6,%cr31
#ifdef CONFIG_SMP
/* Set the smp rendevous address into page zero.
** It would be safer to do this in init_smp_config() but
** it's just way easier to deal with here because
** of 64-bit function ptrs and the address is local to this file.
*/
load32 PA(smp_slave_stext),%r10
stw %r10,0x10(%r0) /* MEM_RENDEZ */
stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */
/* FALLTHROUGH */
.procend
/*
** Code Common to both Monarch and Slave processors.
** Entry:
**
** 1.1:
** %r11 must contain RFI target address.
** %r25/%r26 args to pass to target function
** %r2 in case rfi target decides it didn't like something
**
** 2.0w:
** %r3 PDCE_PROC address
** %r11 RFI target address
**
** Caller must init: SR4-7, %sp, %r10, %cr24/25,
*/
common_stext:
.proc
.callinfo
#else
/* Clear PDC entry point - we won't use it */
stw %r0,0x10(%r0) /* MEM_RENDEZ */
stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
#endif /*CONFIG_SMP*/
#ifdef __LP64__
tophys_r1 %sp
/* Save the rfi target address */
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
tophys_r1 %r10
std %r11, TASK_PT_GR11(%r10)
/* Switch to wide mode Superdome doesn't support narrow PDC
** calls.
*/
1: mfia %rp /* clear upper part of pcoq */
ldo 2f-1b(%rp),%rp
depdi 0,31,32,%rp
bv (%rp)
ssm PSW_SM_W,%r0
/* Set Wide mode as the "Default" (eg for traps)
** First trap occurs *right* after (or part of) rfi for slave CPUs.
** Someday, palo might not do this for the Monarch either.
*/
2:
#define MEM_PDC_LO 0x388
#define MEM_PDC_HI 0x35C
ldw MEM_PDC_LO(%r0),%r3
ldw MEM_PDC_HI(%r0),%r6
depd %r6, 31, 32, %r3 /* move to upper word */
ldo PDC_PSW(%r0),%arg0 /* 21 */
ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
load32 PA(stext_pdc_ret), %rp
bv (%r3)
copy %r0,%arg3
stext_pdc_ret:
/* restore rfi target address*/
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
tophys_r1 %r10
ldd TASK_PT_GR11(%r10), %r11
tovirt_r1 %sp
#endif
/* PARANOID: clear user scratch/user space SR's */
mtsp %r0,%sr0
mtsp %r0,%sr1
mtsp %r0,%sr2
mtsp %r0,%sr3
/* Initialize Protection Registers */
mtctl %r0,%cr8
mtctl %r0,%cr9
mtctl %r0,%cr12
mtctl %r0,%cr13
/* Prepare to RFI! Man all the cannons! */
/* Initialize the global data pointer */
loadgp
/* Set up our interrupt table. HPMCs might not work after this!
*
* We need to install the correct iva for PA1.1 or PA2.0. The
* following short sequence of instructions can determine this
* (without being illegal on a PA1.1 machine).
*/
#ifndef __LP64__
ldi 32,%r10
mtctl %r10,%cr11
.level 2.0
mfctl,w %cr11,%r10
.level 1.1
comib,<>,n 0,%r10,$is_pa20
ldil L%PA(fault_vector_11),%r10
b $install_iva
ldo R%PA(fault_vector_11)(%r10),%r10
$is_pa20:
.level LEVEL /* restore 1.1 || 2.0w */
#endif /*!LP64*/
load32 PA(fault_vector_20),%r10
$install_iva:
mtctl %r10,%cr14
#ifdef __LP64__
b aligned_rfi
nop
.align 256
aligned_rfi:
ssm 0,0
nop /* 1 */
nop /* 2 */
nop /* 3 */
nop /* 4 */
nop /* 5 */
nop /* 6 */
nop /* 7 */
nop /* 8 */
#endif
#ifdef __LP64__ /* move to psw.h? */
#define PSW_BITS PSW_Q+PSW_I+PSW_D+PSW_P+PSW_R
#else
#define PSW_BITS PSW_SM_Q
#endif
$rfi:
/* turn off troublesome PSW bits */
rsm PSW_BITS,%r0
/* kernel PSW:
* - no interruptions except HPMC and TOC (which are handled by PDC)
* - Q bit set (IODC / PDC interruptions)
* - big-endian
* - virtually mapped
*/
load32 KERNEL_PSW,%r10
mtctl %r10,%ipsw
/* Set the space pointers for the post-RFI world
** Clear the two-level IIA Space Queue, effectively setting
** Kernel space.
*/
mtctl %r0,%cr17 /* Clear IIASQ tail */
mtctl %r0,%cr17 /* Clear IIASQ head */
/* Load RFI target into PC queue */
mtctl %r11,%cr18 /* IIAOQ head */
ldo 4(%r11),%r11
mtctl %r11,%cr18 /* IIAOQ tail */
/* Jump to hyperspace */
rfi
nop
.procend
#ifdef CONFIG_SMP
.import smp_init_current_idle_task,data
.import smp_callin,code
#ifndef __LP64__
smp_callin_rtn:
.proc
.callinfo
break 1,1 /* Break if returned from start_secondary */
nop
nop
.procend
#endif /*!LP64*/
/***************************************************************************
* smp_slave_stext is executed by all non-monarch Processors when the Monarch
* pokes the slave CPUs in smp.c:smp_boot_cpus().
*
* Once here, registers values are initialized in order to branch to virtual
* mode. Once all available/eligible CPUs are in virtual mode, all are
* released and start out by executing their own idle task.
*****************************************************************************/
smp_slave_stext:
.proc
.callinfo
/*
** Initialize Space registers
*/
mtsp %r0,%sr4
mtsp %r0,%sr5
mtsp %r0,%sr6
mtsp %r0,%sr7
/* Initialize the SP - monarch sets up smp_init_current_idle_task */
load32 PA(smp_init_current_idle_task),%sp
LDREG 0(%sp),%sp /* load task address */
tophys_r1 %sp
LDREG TASK_THREAD_INFO(%sp),%sp
mtctl %sp,%cr30 /* store in cr30 */
ldo THREAD_SZ_ALGN(%sp),%sp
/* point CPU to kernel page tables */
load32 PA(swapper_pg_dir),%r4
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
#ifdef __LP64__
/* Setup PDCE_PROC entry */
copy %arg0,%r3
#else
/* Load RFI *return* address in case smp_callin bails */
load32 smp_callin_rtn,%r2
#endif
/* Load RFI target address. */
load32 smp_callin,%r11
/* ok...common code can handle the rest */
b common_stext
nop
.procend
#endif /* CONFIG_SMP */
#ifndef __LP64__
.data
.align 4
.export $global$,data
.type $global$,@object
.size $global$,4
$global$:
.word 0
#endif /*!LP64*/

304
arch/parisc/kernel/hpmc.S Normal file
View File

@@ -0,0 +1,304 @@
/*
* HPMC (High Priority Machine Check) handler.
*
* Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org>
* Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
* Copyright (C) 2000 Hewlett-Packard (John Marvin)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* This HPMC handler retrieves the HPMC pim data, resets IO and
* returns to the default trap handler with code set to 1 (HPMC).
* The default trap handler calls handle interruption, which
* does a stack and register dump. This at least allows kernel
* developers to get back to C code in virtual mode, where they
* have the option to examine and print values from memory that
* would help in debugging an HPMC caused by a software bug.
*
* There is more to do here:
*
* 1) On MP systems we need to synchronize processors
* before calling pdc/iodc.
* 2) We should be checking the system state and not
* returning to the fault handler if things are really
* bad.
*
*/
.level 1.1
.data
#include <asm/assembly.h>
#include <asm/pdc.h>
/*
* stack for os_hpmc, the HPMC handler.
* buffer for IODC procedures (for the HPMC handler).
*
* IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
*/
.align 4096
hpmc_stack:
.block 16384
#define HPMC_IODC_BUF_SIZE 0x8000
.align 4096
hpmc_iodc_buf:
.block HPMC_IODC_BUF_SIZE
.align 8
hpmc_raddr:
.block 128
#define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */
.export hpmc_pim_data, data
.align 8
hpmc_pim_data:
.block HPMC_PIM_DATA_SIZE
.text
.export os_hpmc, code
.import intr_save, code
os_hpmc:
/*
* registers modified:
*
* Using callee saves registers without saving them. The
* original values are in the pim dump if we need them.
*
* r2 (rp) return pointer
* r3 address of PDCE_PROC
* r4 scratch
* r5 scratch
* r23 (arg3) procedure arg
* r24 (arg2) procedure arg
* r25 (arg1) procedure arg
* r26 (arg0) procedure arg
* r30 (sp) stack pointer
*
* registers read:
*
* r26 contains address of PDCE_PROC on entry
* r28 (ret0) return value from procedure
*/
copy arg0, %r3 /* save address of PDCE_PROC */
/*
* disable nested HPMCs
*
* Increment os_hpmc checksum to invalidate it.
* Do this before turning the PSW M bit off.
*/
mfctl %cr14, %r4
ldw 52(%r4),%r5
addi 1,%r5,%r5
stw %r5,52(%r4)
/* MP_FIXME: synchronize all processors. */
/* Setup stack pointer. */
load32 PA(hpmc_stack),sp
ldo 128(sp),sp /* leave room for arguments */
/*
* Most PDC routines require that the M bit be off.
* So turn on the Q bit and turn off the M bit.
*/
ldo 8(%r0),%r4 /* PSW Q on, PSW M off */
mtctl %r4,ipsw
mtctl %r0,pcsq
mtctl %r0,pcsq
load32 PA(os_hpmc_1),%r4
mtctl %r4,pcoq
ldo 4(%r4),%r4
mtctl %r4,pcoq
rfi
nop
os_hpmc_1:
/* Call PDC_PIM to get HPMC pim info */
/*
* Note that on some newer boxes, PDC_PIM must be called
* before PDC_IO if you want IO to be reset. PDC_PIM sets
* a flag that PDC_IO examines.
*/
ldo PDC_PIM(%r0), arg0
ldo PDC_PIM_HPMC(%r0),arg1 /* Transfer HPMC data */
load32 PA(hpmc_raddr),arg2
load32 PA(hpmc_pim_data),arg3
load32 HPMC_PIM_DATA_SIZE,%r4
stw %r4,-52(sp)
ldil L%PA(os_hpmc_2), rp
bv (r3) /* call pdce_proc */
ldo R%PA(os_hpmc_2)(rp), rp
os_hpmc_2:
comib,<> 0,ret0, os_hpmc_fail
/* Reset IO by calling the hversion dependent PDC_IO routine */
ldo PDC_IO(%r0),arg0
ldo 0(%r0),arg1 /* log IO errors */
ldo 0(%r0),arg2 /* reserved */
ldo 0(%r0),arg3 /* reserved */
stw %r0,-52(sp) /* reserved */
ldil L%PA(os_hpmc_3),rp
bv (%r3) /* call pdce_proc */
ldo R%PA(os_hpmc_3)(rp),rp
os_hpmc_3:
/* FIXME? Check for errors from PDC_IO (-1 might be OK) */
/*
* Initialize the IODC console device (HPA,SPA, path etc.
* are stored on page 0.
*/
/*
* Load IODC into hpmc_iodc_buf by calling PDC_IODC.
* Note that PDC_IODC handles flushing the appropriate
* data and instruction cache lines.
*/
ldo PDC_IODC(%r0),arg0
ldo PDC_IODC_READ(%r0),arg1
load32 PA(hpmc_raddr),arg2
ldw BOOT_CONSOLE_HPA_OFFSET(%r0),arg3 /* console hpa */
ldo PDC_IODC_RI_INIT(%r0),%r4
stw %r4,-52(sp)
load32 PA(hpmc_iodc_buf),%r4
stw %r4,-56(sp)
load32 HPMC_IODC_BUF_SIZE,%r4
stw %r4,-60(sp)
ldil L%PA(os_hpmc_4),rp
bv (%r3) /* call pdce_proc */
ldo R%PA(os_hpmc_4)(rp),rp
os_hpmc_4:
comib,<> 0,ret0,os_hpmc_fail
/* Call the entry init (just loaded by PDC_IODC) */
ldw BOOT_CONSOLE_HPA_OFFSET(%r0),arg0 /* console hpa */
ldo ENTRY_INIT_MOD_DEV(%r0), arg1
ldw BOOT_CONSOLE_SPA_OFFSET(%r0),arg2 /* console spa */
depi 0,31,11,arg2 /* clear bits 21-31 */
ldo BOOT_CONSOLE_PATH_OFFSET(%r0),arg3 /* console path */
load32 PA(hpmc_raddr),%r4
stw %r4, -52(sp)
stw %r0, -56(sp) /* HV */
stw %r0, -60(sp) /* HV */
stw %r0, -64(sp) /* HV */
stw %r0, -68(sp) /* lang, must be zero */
load32 PA(hpmc_iodc_buf),%r5
ldil L%PA(os_hpmc_5),rp
bv (%r5)
ldo R%PA(os_hpmc_5)(rp),rp
os_hpmc_5:
comib,<> 0,ret0,os_hpmc_fail
/* Prepare to call intr_save */
/*
* Load kernel page directory (load into user also, since
* we don't intend to ever return to user land anyway)
*/
load32 PA(swapper_pg_dir),%r4
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
/* Clear sr4-sr7 */
mtsp %r0, %sr4
mtsp %r0, %sr5
mtsp %r0, %sr6
mtsp %r0, %sr7
tovirt_r1 %r30 /* make sp virtual */
rsm 8,%r0 /* Clear Q bit */
ldi 1,%r8 /* Set trap code to "1" for HPMC */
load32 PA(intr_save),%r1
be 0(%sr7,%r1)
nop
os_hpmc_fail:
/*
* Reset the system
*
* Some systems may lockup from a broadcast reset, so try the
* hversion PDC_BROADCAST_RESET() first.
* MP_FIXME: reset all processors if more than one central bus.
*/
/* PDC_BROADCAST_RESET() */
ldo PDC_BROADCAST_RESET(%r0),arg0
ldo 0(%r0),arg1 /* do reset */
ldil L%PA(os_hpmc_6),rp
bv (%r3) /* call pdce_proc */
ldo R%PA(os_hpmc_6)(rp),rp
os_hpmc_6:
/*
* possible return values:
* -1 non-existent procedure
* -2 non-existent option
* -16 unaligned stack
*
* If call returned, do a broadcast reset.
*/
ldil L%0xfffc0000,%r4 /* IO_BROADCAST */
ldo 5(%r0),%r5
stw %r5,48(%r4) /* CMD_RESET to IO_COMMAND offset */
b .
nop
/* this label used to compute os_hpmc checksum */
.export os_hpmc_end, code
os_hpmc_end:
nop

View File

@@ -0,0 +1,76 @@
/*
* Static declaration of "init" task data structure.
*
* Copyright (C) 2000 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2001 Helge Deller <deller @ parisc-linux.org>
* Copyright (C) 2002 Matthew Wilcox <willy with parisc-linux.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
EXPORT_SYMBOL(init_mm);
/*
* Initial task structure.
*
* We need to make sure that this is 16384-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
unsigned char interrupt_stack[ISTACK_SIZE] __attribute__ ((section("init_istack"), aligned(4096)));
union thread_union init_thread_union
__attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };
#ifdef __LP64__
/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
* with the first pmd adjacent to the pgd and below it. gcc doesn't actually
* guarantee that global objects will be laid out in memory in the same order
* as the order of declaration, so put these in different sections and use
* the linker script to order them. */
pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pmd"))) = { {0}, };
#endif
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pgd"))) = { {0}, };
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pte"))) = { {0}, };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
EXPORT_SYMBOL(init_task);
__asm__(".data");
struct task_struct init_task = INIT_TASK(init_task);

View File

@@ -0,0 +1,612 @@
/*
* inventory.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
* Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
*
* These are the routines to discover what hardware exists in this box.
* This task is complicated by there being 3 different ways of
* performing an inventory, depending largely on the age of the box.
* The recommended way to do this is to check to see whether the machine
* is a `Snake' first, then try System Map, then try PAT. We try System
* Map before checking for a Snake -- this probably doesn't cause any
* problems, but...
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/mmzone.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/parisc-device.h>
/*
** Debug options
** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
*/
#undef DEBUG_PAT
int pdc_type = PDC_TYPE_ILLEGAL;
void __init setup_pdc(void)
{
long status;
unsigned int bus_id;
struct pdc_system_map_mod_info module_result;
struct pdc_module_path module_path;
struct pdc_model model;
#ifdef __LP64__
struct pdc_pat_cell_num cell_info;
#endif
/* Determine the pdc "type" used on this machine */
printk(KERN_INFO "Determining PDC firmware type: ");
status = pdc_system_map_find_mods(&module_result, &module_path, 0);
if (status == PDC_OK) {
pdc_type = PDC_TYPE_SYSTEM_MAP;
printk("System Map.\n");
return;
}
/*
* If the machine doesn't support PDC_SYSTEM_MAP then either it
* is a pdc pat box, or it is an older box. All 64 bit capable
* machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
*/
/*
* TODO: We should test for 64 bit capability and give a
* clearer message.
*/
#ifdef __LP64__
status = pdc_pat_cell_get_number(&cell_info);
if (status == PDC_OK) {
pdc_type = PDC_TYPE_PAT;
printk("64 bit PAT.\n");
return;
}
#endif
/* Check the CPU's bus ID. There's probably a better test. */
status = pdc_model_info(&model);
bus_id = (model.hversion >> (4 + 7)) & 0x1f;
switch (bus_id) {
case 0x4: /* 720, 730, 750, 735, 755 */
case 0x6: /* 705, 710 */
case 0x7: /* 715, 725 */
case 0x8: /* 745, 747, 742 */
case 0xA: /* 712 and similiar */
case 0xC: /* 715/64, at least */
pdc_type = PDC_TYPE_SNAKE;
printk("Snake.\n");
return;
default: /* Everything else */
printk("Unsupported.\n");
panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
}
}
#define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
static void __init
set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
unsigned long pages4k)
{
/* Rather than aligning and potentially throwing away
* memory, we'll assume that any ranges are already
* nicely aligned with any reasonable page size, and
* panic if they are not (it's more likely that the
* pdc info is bad in this case).
*/
if ( ((start & (PAGE_SIZE - 1)) != 0)
|| ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) ) {
panic("Memory range doesn't align with page size!\n");
}
pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
}
static void __init pagezero_memconfig(void)
{
unsigned long npages;
/* Use the 32 bit information from page zero to create a single
* entry in the pmem_ranges[] table.
*
* We currently don't support machines with contiguous memory
* >= 4 Gb, who report that memory using 64 bit only fields
* on page zero. It's not worth doing until it can be tested,
* and it is not clear we can support those machines for other
* reasons.
*
* If that support is done in the future, this is where it
* should be done.
*/
npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
set_pmem_entry(pmem_ranges,0UL,npages);
npmem_ranges = 1;
}
#ifdef __LP64__
/* All of the PDC PAT specific code is 64-bit only */
/*
** The module object is filled via PDC_PAT_CELL[Return Cell Module].
** If a module is found, register module will get the IODC bytes via
** pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
**
** The IO view can be used by PDC_PAT_CELL[Return Cell Module]
** only for SBAs and LBAs. This view will cause an invalid
** argument error for all other cell module types.
**
*/
static int __init
pat_query_module(ulong pcell_loc, ulong mod_index)
{
pdc_pat_cell_mod_maddr_block_t pa_pdc_cell;
unsigned long bytecnt;
unsigned long temp; /* 64-bit scratch value */
long status; /* PDC return value status */
struct parisc_device *dev;
/* return cell module (PA or Processor view) */
status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
PA_VIEW, &pa_pdc_cell);
if (status != PDC_OK) {
/* no more cell modules or error */
return status;
}
temp = pa_pdc_cell.cba;
dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path);
if (!dev) {
return PDC_NE_MOD;
}
/* alloc_pa_dev sets dev->hpa */
/*
** save parameters in the parisc_device
** (The idea being the device driver will call pdc_pat_cell_module()
** and store the results in its own data structure.)
*/
dev->pcell_loc = pcell_loc;
dev->mod_index = mod_index;
/* save generic info returned from the call */
/* REVISIT: who is the consumer of this? not sure yet... */
dev->mod_info = pa_pdc_cell.mod_info; /* pass to PAT_GET_ENTITY() */
dev->pmod_loc = pa_pdc_cell.mod_location;
register_parisc_device(dev); /* advertise device */
#ifdef DEBUG_PAT
pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
/* dump what we see so far... */
switch (PAT_GET_ENTITY(dev->mod_info)) {
unsigned long i;
case PAT_ENTITY_PROC:
printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
pa_pdc_cell.mod[0]);
break;
case PAT_ENTITY_MEM:
printk(KERN_DEBUG
"PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
pa_pdc_cell.mod[0], pa_pdc_cell.mod[1],
pa_pdc_cell.mod[2]);
break;
case PAT_ENTITY_CA:
printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
break;
case PAT_ENTITY_PBC:
printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
goto print_ranges;
case PAT_ENTITY_SBA:
printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
goto print_ranges;
case PAT_ENTITY_LBA:
printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
print_ranges:
pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
IO_VIEW, &io_pdc_cell);
printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell.mod[1]);
for (i = 0; i < pa_pdc_cell.mod[1]; i++) {
printk(KERN_DEBUG
" PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
i, pa_pdc_cell.mod[2 + i * 3], /* type */
pa_pdc_cell.mod[3 + i * 3], /* start */
pa_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */
printk(KERN_DEBUG
" IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
i, io_pdc_cell.mod[2 + i * 3], /* type */
io_pdc_cell.mod[3 + i * 3], /* start */
io_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */
}
printk(KERN_DEBUG "\n");
break;
}
#endif /* DEBUG_PAT */
return PDC_OK;
}
/* pat pdc can return information about a variety of different
* types of memory (e.g. firmware,i/o, etc) but we only care about
* the usable physical ram right now. Since the firmware specific
* information is allocated on the stack, we'll be generous, in
* case there is a lot of other information we don't care about.
*/
#define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
static void __init pat_memconfig(void)
{
unsigned long actual_len;
struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
physmem_range_t *pmem_ptr;
long status;
int entries;
unsigned long length;
int i;
length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
if ((status != PDC_OK)
|| ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
/* The above pdc call shouldn't fail, but, just in
* case, just use the PAGE0 info.
*/
printk("\n\n\n");
printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
"All memory may not be used!\n\n\n");
pagezero_memconfig();
return;
}
entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
if (entries > PAT_MAX_RANGES) {
printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
printk(KERN_WARNING "Some memory may not be used!\n");
}
/* Copy information into the firmware independent pmem_ranges
* array, skipping types we don't care about. Notice we said
* "may" above. We'll use all the entries that were returned.
*/
npmem_ranges = 0;
mtbl_ptr = mem_table;
pmem_ptr = pmem_ranges; /* Global firmware independent table */
for (i = 0; i < entries; i++,mtbl_ptr++) {
if ( (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
|| (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
|| (mtbl_ptr->pages == 0)
|| ( (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
continue;
}
if (npmem_ranges == MAX_PHYSMEM_RANGES) {
printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
printk(KERN_WARNING "Some memory will not be used!\n");
break;
}
set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
npmem_ranges++;
}
}
static int __init pat_inventory(void)
{
int status;
ulong mod_index = 0;
struct pdc_pat_cell_num cell_info;
/*
** Note: Prelude (and it's successors: Lclass, A400/500) only
** implement PDC_PAT_CELL sub-options 0 and 2.
*/
status = pdc_pat_cell_get_number(&cell_info);
if (status != PDC_OK) {
return 0;
}
#ifdef DEBUG_PAT
printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num,
cell_info.cell_loc);
#endif
while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
mod_index++;
}
return mod_index;
}
/* We only look for extended memory ranges on a 64 bit capable box */
static void __init sprockets_memconfig(void)
{
struct pdc_memory_table_raddr r_addr;
struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
struct pdc_memory_table *mtbl_ptr;
physmem_range_t *pmem_ptr;
long status;
int entries;
int i;
status = pdc_mem_mem_table(&r_addr,mem_table,
(unsigned long)MAX_PHYSMEM_RANGES);
if (status != PDC_OK) {
/* The above pdc call only works on boxes with sprockets
* firmware (newer B,C,J class). Other non PAT PDC machines
* do support more than 3.75 Gb of memory, but we don't
* support them yet.
*/
pagezero_memconfig();
return;
}
if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
printk(KERN_WARNING "Some memory will not be used!\n");
}
entries = (int)r_addr.entries_returned;
npmem_ranges = 0;
mtbl_ptr = mem_table;
pmem_ptr = pmem_ranges; /* Global firmware independent table */
for (i = 0; i < entries; i++,mtbl_ptr++) {
set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
npmem_ranges++;
}
}
#else /* !__LP64__ */
#define pat_inventory() do { } while (0)
#define pat_memconfig() do { } while (0)
#define sprockets_memconfig() pagezero_memconfig()
#endif /* !__LP64__ */
#ifndef CONFIG_PA20
/* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
static struct parisc_device * __init
legacy_create_device(struct pdc_memory_map *r_addr,
struct pdc_module_path *module_path)
{
struct parisc_device *dev;
int status = pdc_mem_map_hpa(r_addr, module_path);
if (status != PDC_OK)
return NULL;
dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
if (dev == NULL)
return NULL;
register_parisc_device(dev);
return dev;
}
/**
* snake_inventory
*
* Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
* To use it, we initialise the mod_path.bc to 0xff and try all values of
* mod to get the HPA for the top-level devices. Bus adapters may have
* sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
* module, then trying all possible functions.
*/
static void __init snake_inventory(void)
{
int mod;
for (mod = 0; mod < 16; mod++) {
struct parisc_device *dev;
struct pdc_module_path module_path;
struct pdc_memory_map r_addr;
unsigned int func;
memset(module_path.path.bc, 0xff, 6);
module_path.path.mod = mod;
dev = legacy_create_device(&r_addr, &module_path);
if ((!dev) || (dev->id.hw_type != HPHW_BA))
continue;
memset(module_path.path.bc, 0xff, 4);
module_path.path.bc[4] = mod;
for (func = 0; func < 16; func++) {
module_path.path.bc[5] = 0;
module_path.path.mod = func;
legacy_create_device(&r_addr, &module_path);
}
}
}
#else /* CONFIG_PA20 */
#define snake_inventory() do { } while (0)
#endif /* CONFIG_PA20 */
/* Common 32/64 bit based code goes here */
/**
* add_system_map_addresses - Add additional addresses to the parisc device.
* @dev: The parisc device.
* @num_addrs: Then number of addresses to add;
* @module_instance: The system_map module instance.
*
* This function adds any additional addresses reported by the system_map
* firmware to the parisc device.
*/
static void __init
add_system_map_addresses(struct parisc_device *dev, int num_addrs,
int module_instance)
{
int i;
long status;
struct pdc_system_map_addr_info addr_result;
dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL);
if(!dev->addr) {
printk(KERN_ERR "%s %s(): memory allocation failure\n",
__FILE__, __FUNCTION__);
return;
}
for(i = 1; i <= num_addrs; ++i) {
status = pdc_system_map_find_addrs(&addr_result,
module_instance, i);
if(PDC_OK == status) {
dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
dev->num_addrs++;
} else {
printk(KERN_WARNING
"Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
status, i);
}
}
}
/**
* system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
*
* This function attempts to retrieve and register all the devices firmware
* knows about via the SYSTEM_MAP PDC call.
*/
static void __init system_map_inventory(void)
{
int i;
long status = PDC_OK;
for (i = 0; i < 256; i++) {
struct parisc_device *dev;
struct pdc_system_map_mod_info module_result;
struct pdc_module_path module_path;
status = pdc_system_map_find_mods(&module_result,
&module_path, i);
if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
break;
if (status != PDC_OK)
continue;
dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
if (!dev)
continue;
register_parisc_device(dev);
/* if available, get the additional addresses for a module */
if (!module_result.add_addrs)
continue;
add_system_map_addresses(dev, module_result.add_addrs, i);
}
walk_central_bus();
return;
}
void __init do_memory_inventory(void)
{
switch (pdc_type) {
case PDC_TYPE_PAT:
pat_memconfig();
break;
case PDC_TYPE_SYSTEM_MAP:
sprockets_memconfig();
break;
case PDC_TYPE_SNAKE:
pagezero_memconfig();
return;
default:
panic("Unknown PDC type!\n");
}
if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
printk(KERN_WARNING "Bad memory configuration returned!\n");
printk(KERN_WARNING "Some memory may not be used!\n");
pagezero_memconfig();
}
}
void __init do_device_inventory(void)
{
printk(KERN_INFO "Searching for devices...\n");
init_parisc_bus();
switch (pdc_type) {
case PDC_TYPE_PAT:
pat_inventory();
break;
case PDC_TYPE_SYSTEM_MAP:
system_map_inventory();
break;
case PDC_TYPE_SNAKE:
snake_inventory();
break;
default:
panic("Unknown PDC type!\n");
}
printk(KERN_INFO "Found devices:\n");
print_parisc_devices();
}

View File

@@ -0,0 +1,625 @@
/* $Id: ioctl32.c,v 1.5 2002/10/18 00:21:43 varenet Exp $
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
*
* Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
*
* These routines maintain argument size conversion between 32bit and 64bit
* ioctls.
*/
#include <linux/syscalls.h>
#define INCLUDES
#include "compat_ioctl.c"
#include <asm/perf.h>
#include <asm/ioctls.h>
#define CODE
#include "compat_ioctl.c"
/* Use this to get at 32-bit user passed pointers.
See sys_sparc32.c for description about these. */
#define A(__x) ((unsigned long)(__x))
/* The same for use with copy_from_user() and copy_to_user(). */
#define B(__x) ((void *)(unsigned long)(__x))
#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
/* This really belongs in include/linux/drm.h -DaveM */
#include "../../../drivers/char/drm/drm.h"
typedef struct drm32_version {
int version_major; /* Major version */
int version_minor; /* Minor version */
int version_patchlevel;/* Patch level */
int name_len; /* Length of name buffer */
u32 name; /* Name of driver */
int date_len; /* Length of date buffer */
u32 date; /* User-space buffer to hold date */
int desc_len; /* Length of desc buffer */
u32 desc; /* User-space buffer to hold desc */
} drm32_version_t;
#define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t)
static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_version_t *uversion = (drm32_version_t *)arg;
char *name_ptr, *date_ptr, *desc_ptr;
u32 tmp1, tmp2, tmp3;
drm_version_t kversion;
mm_segment_t old_fs;
int ret;
memset(&kversion, 0, sizeof(kversion));
if (get_user(kversion.name_len, &uversion->name_len) ||
get_user(kversion.date_len, &uversion->date_len) ||
get_user(kversion.desc_len, &uversion->desc_len) ||
get_user(tmp1, &uversion->name) ||
get_user(tmp2, &uversion->date) ||
get_user(tmp3, &uversion->desc))
return -EFAULT;
name_ptr = (char *) A(tmp1);
date_ptr = (char *) A(tmp2);
desc_ptr = (char *) A(tmp3);
ret = -ENOMEM;
if (kversion.name_len && name_ptr) {
kversion.name = kmalloc(kversion.name_len, GFP_KERNEL);
if (!kversion.name)
goto out;
}
if (kversion.date_len && date_ptr) {
kversion.date = kmalloc(kversion.date_len, GFP_KERNEL);
if (!kversion.date)
goto out;
}
if (kversion.desc_len && desc_ptr) {
kversion.desc = kmalloc(kversion.desc_len, GFP_KERNEL);
if (!kversion.desc)
goto out;
}
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_ioctl (fd, DRM_IOCTL_VERSION, (unsigned long)&kversion);
set_fs(old_fs);
if (!ret) {
if ((kversion.name &&
copy_to_user(name_ptr, kversion.name, kversion.name_len)) ||
(kversion.date &&
copy_to_user(date_ptr, kversion.date, kversion.date_len)) ||
(kversion.desc &&
copy_to_user(desc_ptr, kversion.desc, kversion.desc_len)))
ret = -EFAULT;
if (put_user(kversion.version_major, &uversion->version_major) ||
put_user(kversion.version_minor, &uversion->version_minor) ||
put_user(kversion.version_patchlevel, &uversion->version_patchlevel) ||
put_user(kversion.name_len, &uversion->name_len) ||
put_user(kversion.date_len, &uversion->date_len) ||
put_user(kversion.desc_len, &uversion->desc_len))
ret = -EFAULT;
}
out:
if (kversion.name)
kfree(kversion.name);
if (kversion.date)
kfree(kversion.date);
if (kversion.desc)
kfree(kversion.desc);
return ret;
}
typedef struct drm32_unique {
int unique_len; /* Length of unique */
u32 unique; /* Unique name for driver instantiation */
} drm32_unique_t;
#define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t)
#define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t)
static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_unique_t *uarg = (drm32_unique_t *)arg;
drm_unique_t karg;
mm_segment_t old_fs;
char *uptr;
u32 tmp;
int ret;
if (get_user(karg.unique_len, &uarg->unique_len))
return -EFAULT;
karg.unique = NULL;
if (get_user(tmp, &uarg->unique))
return -EFAULT;
uptr = (char *) A(tmp);
if (uptr) {
karg.unique = kmalloc(karg.unique_len, GFP_KERNEL);
if (!karg.unique)
return -ENOMEM;
if (cmd == DRM32_IOCTL_SET_UNIQUE &&
copy_from_user(karg.unique, uptr, karg.unique_len)) {
kfree(karg.unique);
return -EFAULT;
}
}
old_fs = get_fs();
set_fs(KERNEL_DS);
if (cmd == DRM32_IOCTL_GET_UNIQUE)
ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)&karg);
else
ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)&karg);
set_fs(old_fs);
if (!ret) {
if (cmd == DRM32_IOCTL_GET_UNIQUE &&
uptr != NULL &&
copy_to_user(uptr, karg.unique, karg.unique_len))
ret = -EFAULT;
if (put_user(karg.unique_len, &uarg->unique_len))
ret = -EFAULT;
}
if (karg.unique != NULL)
kfree(karg.unique);
return ret;
}
typedef struct drm32_map {
u32 offset; /* Requested physical address (0 for SAREA)*/
u32 size; /* Requested physical size (bytes) */
drm_map_type_t type; /* Type of memory to map */
drm_map_flags_t flags; /* Flags */
u32 handle; /* User-space: "Handle" to pass to mmap */
/* Kernel-space: kernel-virtual address */
int mtrr; /* MTRR slot used */
/* Private data */
} drm32_map_t;
#define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t)
static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_map_t *uarg = (drm32_map_t *) arg;
drm_map_t karg;
mm_segment_t old_fs;
u32 tmp;
int ret;
ret = get_user(karg.offset, &uarg->offset);
ret |= get_user(karg.size, &uarg->size);
ret |= get_user(karg.type, &uarg->type);
ret |= get_user(karg.flags, &uarg->flags);
ret |= get_user(tmp, &uarg->handle);
ret |= get_user(karg.mtrr, &uarg->mtrr);
if (ret)
return -EFAULT;
karg.handle = (void *) A(tmp);
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg);
set_fs(old_fs);
if (!ret) {
ret = put_user(karg.offset, &uarg->offset);
ret |= put_user(karg.size, &uarg->size);
ret |= put_user(karg.type, &uarg->type);
ret |= put_user(karg.flags, &uarg->flags);
tmp = (u32) (long)karg.handle;
ret |= put_user(tmp, &uarg->handle);
ret |= put_user(karg.mtrr, &uarg->mtrr);
if (ret)
ret = -EFAULT;
}
return ret;
}
typedef struct drm32_buf_info {
int count; /* Entries in list */
u32 list; /* (drm_buf_desc_t *) */
} drm32_buf_info_t;
#define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t)
static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_buf_info_t *uarg = (drm32_buf_info_t *)arg;
drm_buf_desc_t *ulist;
drm_buf_info_t karg;
mm_segment_t old_fs;
int orig_count, ret;
u32 tmp;
if (get_user(karg.count, &uarg->count) ||
get_user(tmp, &uarg->list))
return -EFAULT;
ulist = (drm_buf_desc_t *) A(tmp);
orig_count = karg.count;
karg.list = kmalloc(karg.count * sizeof(drm_buf_desc_t), GFP_KERNEL);
if (!karg.list)
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long) &karg);
set_fs(old_fs);
if (!ret) {
if (karg.count <= orig_count &&
(copy_to_user(ulist, karg.list,
karg.count * sizeof(drm_buf_desc_t))))
ret = -EFAULT;
if (put_user(karg.count, &uarg->count))
ret = -EFAULT;
}
kfree(karg.list);
return ret;
}
typedef struct drm32_buf_free {
int count;
u32 list; /* (int *) */
} drm32_buf_free_t;
#define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t)
static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_buf_free_t *uarg = (drm32_buf_free_t *)arg;
drm_buf_free_t karg;
mm_segment_t old_fs;
int *ulist;
int ret;
u32 tmp;
if (get_user(karg.count, &uarg->count) ||
get_user(tmp, &uarg->list))
return -EFAULT;
ulist = (int *) A(tmp);
karg.list = kmalloc(karg.count * sizeof(int), GFP_KERNEL);
if (!karg.list)
return -ENOMEM;
ret = -EFAULT;
if (copy_from_user(karg.list, ulist, (karg.count * sizeof(int))))
goto out;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long) &karg);
set_fs(old_fs);
out:
kfree(karg.list);
return ret;
}
typedef struct drm32_buf_pub {
int idx; /* Index into master buflist */
int total; /* Buffer size */
int used; /* Amount of buffer in use (for DMA) */
u32 address; /* Address of buffer (void *) */
} drm32_buf_pub_t;
typedef struct drm32_buf_map {
int count; /* Length of buflist */
u32 virtual; /* Mmaped area in user-virtual (void *) */
u32 list; /* Buffer information (drm_buf_pub_t *) */
} drm32_buf_map_t;
#define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t)
static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_buf_map_t *uarg = (drm32_buf_map_t *)arg;
drm32_buf_pub_t *ulist;
drm_buf_map_t karg;
mm_segment_t old_fs;
int orig_count, ret, i;
u32 tmp1, tmp2;
if (get_user(karg.count, &uarg->count) ||
get_user(tmp1, &uarg->virtual) ||
get_user(tmp2, &uarg->list))
return -EFAULT;
karg.virtual = (void *) A(tmp1);
ulist = (drm32_buf_pub_t *) A(tmp2);
orig_count = karg.count;
karg.list = kmalloc(karg.count * sizeof(drm_buf_pub_t), GFP_KERNEL);
if (!karg.list)
return -ENOMEM;
ret = -EFAULT;
for (i = 0; i < karg.count; i++) {
if (get_user(karg.list[i].idx, &ulist[i].idx) ||
get_user(karg.list[i].total, &ulist[i].total) ||
get_user(karg.list[i].used, &ulist[i].used) ||
get_user(tmp1, &ulist[i].address))
goto out;
karg.list[i].address = (void *) A(tmp1);
}
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) &karg);
set_fs(old_fs);
if (!ret) {
for (i = 0; i < orig_count; i++) {
tmp1 = (u32) (long) karg.list[i].address;
if (put_user(karg.list[i].idx, &ulist[i].idx) ||
put_user(karg.list[i].total, &ulist[i].total) ||
put_user(karg.list[i].used, &ulist[i].used) ||
put_user(tmp1, &ulist[i].address)) {
ret = -EFAULT;
goto out;
}
}
if (put_user(karg.count, &uarg->count))
ret = -EFAULT;
}
out:
kfree(karg.list);
return ret;
}
typedef struct drm32_dma {
/* Indices here refer to the offset into
buflist in drm_buf_get_t. */
int context; /* Context handle */
int send_count; /* Number of buffers to send */
u32 send_indices; /* List of handles to buffers (int *) */
u32 send_sizes; /* Lengths of data to send (int *) */
drm_dma_flags_t flags; /* Flags */
int request_count; /* Number of buffers requested */
int request_size; /* Desired size for buffers */
u32 request_indices; /* Buffer information (int *) */
u32 request_sizes; /* (int *) */
int granted_count; /* Number of buffers granted */
} drm32_dma_t;
#define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t)
/* RED PEN The DRM layer blindly dereferences the send/request
* indice/size arrays even though they are userland
* pointers. -DaveM
*/
static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_dma_t *uarg = (drm32_dma_t *) arg;
int *u_si, *u_ss, *u_ri, *u_rs;
drm_dma_t karg;
mm_segment_t old_fs;
int ret;
u32 tmp1, tmp2, tmp3, tmp4;
karg.send_indices = karg.send_sizes = NULL;
karg.request_indices = karg.request_sizes = NULL;
if (get_user(karg.context, &uarg->context) ||
get_user(karg.send_count, &uarg->send_count) ||
get_user(tmp1, &uarg->send_indices) ||
get_user(tmp2, &uarg->send_sizes) ||
get_user(karg.flags, &uarg->flags) ||
get_user(karg.request_count, &uarg->request_count) ||
get_user(karg.request_size, &uarg->request_size) ||
get_user(tmp3, &uarg->request_indices) ||
get_user(tmp4, &uarg->request_sizes) ||
get_user(karg.granted_count, &uarg->granted_count))
return -EFAULT;
u_si = (int *) A(tmp1);
u_ss = (int *) A(tmp2);
u_ri = (int *) A(tmp3);
u_rs = (int *) A(tmp4);
if (karg.send_count) {
karg.send_indices = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
karg.send_sizes = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
ret = -ENOMEM;
if (!karg.send_indices || !karg.send_sizes)
goto out;
ret = -EFAULT;
if (copy_from_user(karg.send_indices, u_si,
(karg.send_count * sizeof(int))) ||
copy_from_user(karg.send_sizes, u_ss,
(karg.send_count * sizeof(int))))
goto out;
}
if (karg.request_count) {
karg.request_indices = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
karg.request_sizes = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
ret = -ENOMEM;
if (!karg.request_indices || !karg.request_sizes)
goto out;
ret = -EFAULT;
if (copy_from_user(karg.request_indices, u_ri,
(karg.request_count * sizeof(int))) ||
copy_from_user(karg.request_sizes, u_rs,
(karg.request_count * sizeof(int))))
goto out;
}
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long) &karg);
set_fs(old_fs);
if (!ret) {
if (put_user(karg.context, &uarg->context) ||
put_user(karg.send_count, &uarg->send_count) ||
put_user(karg.flags, &uarg->flags) ||
put_user(karg.request_count, &uarg->request_count) ||
put_user(karg.request_size, &uarg->request_size) ||
put_user(karg.granted_count, &uarg->granted_count))
ret = -EFAULT;
if (karg.send_count) {
if (copy_to_user(u_si, karg.send_indices,
(karg.send_count * sizeof(int))) ||
copy_to_user(u_ss, karg.send_sizes,
(karg.send_count * sizeof(int))))
ret = -EFAULT;
}
if (karg.request_count) {
if (copy_to_user(u_ri, karg.request_indices,
(karg.request_count * sizeof(int))) ||
copy_to_user(u_rs, karg.request_sizes,
(karg.request_count * sizeof(int))))
ret = -EFAULT;
}
}
out:
if (karg.send_indices)
kfree(karg.send_indices);
if (karg.send_sizes)
kfree(karg.send_sizes);
if (karg.request_indices)
kfree(karg.request_indices);
if (karg.request_sizes)
kfree(karg.request_sizes);
return ret;
}
typedef struct drm32_ctx_res {
int count;
u32 contexts; /* (drm_ctx_t *) */
} drm32_ctx_res_t;
#define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t)
static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_ctx_res_t *uarg = (drm32_ctx_res_t *) arg;
drm_ctx_t *ulist;
drm_ctx_res_t karg;
mm_segment_t old_fs;
int orig_count, ret;
u32 tmp;
karg.contexts = NULL;
if (get_user(karg.count, &uarg->count) ||
get_user(tmp, &uarg->contexts))
return -EFAULT;
ulist = (drm_ctx_t *) A(tmp);
orig_count = karg.count;
if (karg.count && ulist) {
karg.contexts = kmalloc((karg.count * sizeof(drm_ctx_t)), GFP_KERNEL);
if (!karg.contexts)
return -ENOMEM;
if (copy_from_user(karg.contexts, ulist,
(karg.count * sizeof(drm_ctx_t)))) {
kfree(karg.contexts);
return -EFAULT;
}
}
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long) &karg);
set_fs(old_fs);
if (!ret) {
if (orig_count) {
if (copy_to_user(ulist, karg.contexts,
(orig_count * sizeof(drm_ctx_t))))
ret = -EFAULT;
}
if (put_user(karg.count, &uarg->count))
ret = -EFAULT;
}
if (karg.contexts)
kfree(karg.contexts);
return ret;
}
#endif
#define HANDLE_IOCTL(cmd, handler) { cmd, (ioctl_trans_handler_t)handler, NULL },
#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd, sys_ioctl)
#define IOCTL_TABLE_START struct ioctl_trans ioctl_start[] = {
#define IOCTL_TABLE_END };
IOCTL_TABLE_START
#include <linux/compat_ioctl.h>
#define DECLARES
#include "compat_ioctl.c"
/* Might be moved to compat_ioctl.h with some ifdefs... */
COMPATIBLE_IOCTL(TIOCSTART)
COMPATIBLE_IOCTL(TIOCSTOP)
COMPATIBLE_IOCTL(TIOCSLTC)
/* PA-specific ioctls */
COMPATIBLE_IOCTL(PA_PERF_ON)
COMPATIBLE_IOCTL(PA_PERF_OFF)
COMPATIBLE_IOCTL(PA_PERF_VERSION)
/* And these ioctls need translation */
HANDLE_IOCTL(SIOCGPPPSTATS, dev_ifsioc)
HANDLE_IOCTL(SIOCGPPPCSTATS, dev_ifsioc)
HANDLE_IOCTL(SIOCGPPPVER, dev_ifsioc)
#if defined(CONFIG_GEN_RTC)
COMPATIBLE_IOCTL(RTC_AIE_ON)
COMPATIBLE_IOCTL(RTC_AIE_OFF)
COMPATIBLE_IOCTL(RTC_UIE_ON)
COMPATIBLE_IOCTL(RTC_UIE_OFF)
COMPATIBLE_IOCTL(RTC_PIE_ON)
COMPATIBLE_IOCTL(RTC_PIE_OFF)
COMPATIBLE_IOCTL(RTC_WIE_ON)
COMPATIBLE_IOCTL(RTC_WIE_OFF)
COMPATIBLE_IOCTL(RTC_ALM_SET) /* struct rtc_time only has ints */
COMPATIBLE_IOCTL(RTC_ALM_READ) /* struct rtc_time only has ints */
COMPATIBLE_IOCTL(RTC_RD_TIME) /* struct rtc_time only has ints */
COMPATIBLE_IOCTL(RTC_SET_TIME) /* struct rtc_time only has ints */
HANDLE_IOCTL(RTC_IRQP_READ, w_long)
COMPATIBLE_IOCTL(RTC_IRQP_SET)
HANDLE_IOCTL(RTC_EPOCH_READ, w_long)
COMPATIBLE_IOCTL(RTC_EPOCH_SET)
#endif
#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version);
HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique);
HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique);
HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap);
HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs);
HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs);
HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs);
HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma);
HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx);
#endif /* DRM */
IOCTL_TABLE_END
int ioctl_table_size = ARRAY_SIZE(ioctl_start);

343
arch/parisc/kernel/irq.c Normal file
View File

@@ -0,0 +1,343 @@
/*
* Code to handle x86 style IRQs plus some generic interrupt stuff.
*
* Copyright (C) 1992 Linus Torvalds
* Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
* Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
* Copyright (C) 1999-2000 Grant Grundler
* Copyright (c) 2005 Matthew Wilcox
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/bitops.h>
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#undef PARISC_IRQ_CR16_COUNTS
extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *);
extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
#define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
/* Bits in EIEM correlate with cpu_irq_action[].
** Numbered *Big Endian*! (ie bit 0 is MSB)
*/
static volatile unsigned long cpu_eiem = 0;
static void cpu_set_eiem(void *info)
{
set_eiem((unsigned long) info);
}
static inline void cpu_disable_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
cpu_eiem &= ~eirr_bit;
on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
}
static void cpu_enable_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */
cpu_eiem |= eirr_bit;
on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
}
static unsigned int cpu_startup_irq(unsigned int irq)
{
cpu_enable_irq(irq);
return 0;
}
void no_ack_irq(unsigned int irq) { }
void no_end_irq(unsigned int irq) { }
static struct hw_interrupt_type cpu_interrupt_type = {
.typename = "CPU",
.startup = cpu_startup_irq,
.shutdown = cpu_disable_irq,
.enable = cpu_enable_irq,
.disable = cpu_disable_irq,
.ack = no_ack_irq,
.end = no_end_irq,
// .set_affinity = cpu_set_affinity_irq,
};
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
unsigned long flags;
if (i == 0) {
seq_puts(p, " ");
for_each_online_cpu(j)
seq_printf(p, " CPU%d", j);
#ifdef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " [min/avg/max] (CPU cycle counts)");
#endif
seq_putc(p, '\n');
}
if (i < NR_IRQS) {
struct irqaction *action;
spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
seq_printf(p, "%3d: ", i);
#ifdef CONFIG_SMP
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#else
seq_printf(p, "%10u ", kstat_irqs(i));
#endif
seq_printf(p, " %14s", irq_desc[i].handler->typename);
#ifndef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " %s", action->name);
while ((action = action->next))
seq_printf(p, ", %s", action->name);
#else
for ( ;action; action = action->next) {
unsigned int k, avg, min, max;
min = max = action->cr16_hist[0];
for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
int hist = action->cr16_hist[k];
if (hist) {
avg += hist;
} else
break;
if (hist > max) max = hist;
if (hist < min) min = hist;
}
avg /= k;
seq_printf(p, " %s[%d/%d/%d]", action->name,
min,avg,max);
}
#endif
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
/*
** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
**
** To use txn_XXX() interfaces, get a Virtual IRQ first.
** Then use that to get the Transaction address and data.
*/
int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *type, void *data)
{
if (irq_desc[irq].action)
return -EBUSY;
if (irq_desc[irq].handler != &cpu_interrupt_type)
return -EBUSY;
if (type) {
irq_desc[irq].handler = type;
irq_desc[irq].handler_data = data;
cpu_interrupt_type.enable(irq);
}
return 0;
}
int txn_claim_irq(int irq)
{
return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq;
}
/*
* The bits_wide parameter accommodates the limitations of the HW/SW which
* use these bits:
* Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
* V-class (EPIC): 6 bits
* N/L/A-class (iosapic): 8 bits
* PCI 2.2 MSI: 16 bits
* Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric)
*
* On the service provider side:
* o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)
* o PA 2.0 wide mode 6-bits (per processor)
* o IA64 8-bits (0-256 total)
*
* So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported
* by the processor...and the N/L-class I/O subsystem supports more bits than
* PA2.0 has. The first case is the problem.
*/
int txn_alloc_irq(unsigned int bits_wide)
{
int irq;
/* never return irq 0 cause that's the interval timer */
for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) {
if (cpu_claim_irq(irq, NULL, NULL) < 0)
continue;
if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide))
continue;
return irq;
}
/* unlikely, but be prepared */
return -1;
}
unsigned long txn_alloc_addr(unsigned int virt_irq)
{
static int next_cpu = -1;
next_cpu++; /* assign to "next" CPU we want this bugger on */
/* validate entry */
while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr ||
!cpu_online(next_cpu)))
next_cpu++;
if (next_cpu >= NR_CPUS)
next_cpu = 0; /* nothing else, assign monarch */
return cpu_data[next_cpu].txn_addr;
}
unsigned int txn_alloc_data(unsigned int virt_irq)
{
return virt_irq - CPU_IRQ_BASE;
}
/* ONLY called from entry.S:intr_extint() */
void do_cpu_irq_mask(struct pt_regs *regs)
{
unsigned long eirr_val;
irq_enter();
/*
* Only allow interrupt processing to be interrupted by the
* timer tick
*/
set_eiem(EIEM_MASK(TIMER_IRQ));
/* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
* 2) We loop here on EIRR contents in order to avoid
* nested interrupts or having to take another interrupt
* when we could have just handled it right away.
*/
for (;;) {
unsigned long bit = (1UL << (BITS_PER_LONG - 1));
unsigned int irq;
eirr_val = mfctl(23) & cpu_eiem;
if (!eirr_val)
break;
if (eirr_val & EIEM_MASK(TIMER_IRQ))
set_eiem(0);
mtctl(eirr_val, 23); /* reset bits we are going to process */
/* Work our way from MSb to LSb...same order we alloc EIRs */
for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
if (!(bit & eirr_val))
continue;
/* clear bit in mask - can exit loop sooner */
eirr_val &= ~bit;
__do_IRQ(irq, regs);
}
}
set_eiem(cpu_eiem);
irq_exit();
}
static struct irqaction timer_action = {
.handler = timer_interrupt,
.name = "timer",
};
#ifdef CONFIG_SMP
static struct irqaction ipi_action = {
.handler = ipi_interrupt,
.name = "IPI",
};
#endif
static void claim_cpu_irqs(void)
{
int i;
for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
irq_desc[i].handler = &cpu_interrupt_type;
}
irq_desc[TIMER_IRQ].action = &timer_action;
irq_desc[TIMER_IRQ].status |= IRQ_PER_CPU;
#ifdef CONFIG_SMP
irq_desc[IPI_IRQ].action = &ipi_action;
irq_desc[IPI_IRQ].status = IRQ_PER_CPU;
#endif
}
void __init init_IRQ(void)
{
local_irq_disable(); /* PARANOID - should already be disabled */
mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
claim_cpu_irqs();
#ifdef CONFIG_SMP
if (!cpu_eiem)
cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
#else
cpu_eiem = EIEM_MASK(TIMER_IRQ);
#endif
set_eiem(cpu_eiem); /* EIEM : enable all external intr */
}
void hw_resend_irq(struct hw_interrupt_type *type, unsigned int irq)
{
/* XXX: Needs to be written. We managed without it so far, but
* we really ought to write it.
*/
}
void ack_bad_irq(unsigned int irq)
{
printk("unexpected IRQ %d\n", irq);
}

822
arch/parisc/kernel/module.c Normal file
View File

@@ -0,0 +1,822 @@
/* Kernel dynamically loadable module help for PARISC.
*
* The best reference for this stuff is probably the Processor-
* Specific ELF Supplement for PA-RISC:
* http://ftp.parisc-linux.org/docs/arch/elf-pa-hp.pdf
*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
* Copyright (C) 2003 Randolph Chung <tausq at debian . org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* Notes:
* - SEGREL32 handling
* We are not doing SEGREL32 handling correctly. According to the ABI, we
* should do a value offset, like this:
* if (is_init(me, (void *)val))
* val -= (uint32_t)me->module_init;
* else
* val -= (uint32_t)me->module_core;
* However, SEGREL32 is used only for PARISC unwind entries, and we want
* those entries to have an absolute address, and not just an offset.
*
* The unwind table mechanism has the ability to specify an offset for
* the unwind table; however, because we split off the init functions into
* a different piece of memory, it is not possible to do this using a
* single offset. Instead, we use the above hack for now.
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <asm/unwind.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt...)
#endif
#define CHECK_RELOC(val, bits) \
if ( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \
( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) { \
printk(KERN_ERR "module %s relocation of symbol %s is out of range (0x%lx in %d bits)\n", \
me->name, strtab + sym->st_name, (unsigned long)val, bits); \
return -ENOEXEC; \
}
/* Maximum number of GOT entries. We use a long displacement ldd from
* the bottom of the table, which has a maximum signed displacement of
* 0x3fff; however, since we're only going forward, this becomes
* 0x1fff, and thus, since each GOT entry is 8 bytes long we can have
* at most 1023 entries */
#define MAX_GOTS 1023
/* three functions to determine where in the module core
* or init pieces the location is */
static inline int is_init(struct module *me, void *loc)
{
return (loc >= me->module_init &&
loc <= (me->module_init + me->init_size));
}
static inline int is_core(struct module *me, void *loc)
{
return (loc >= me->module_core &&
loc <= (me->module_core + me->core_size));
}
static inline int is_local(struct module *me, void *loc)
{
return is_init(me, loc) || is_core(me, loc);
}
#ifndef __LP64__
struct got_entry {
Elf32_Addr addr;
};
#define Elf_Fdesc Elf32_Fdesc
struct stub_entry {
Elf32_Word insns[2]; /* each stub entry has two insns */
};
#else
struct got_entry {
Elf64_Addr addr;
};
#define Elf_Fdesc Elf64_Fdesc
struct stub_entry {
Elf64_Word insns[4]; /* each stub entry has four insns */
};
#endif
/* Field selection types defined by hppa */
#define rnd(x) (((x)+0x1000)&~0x1fff)
/* fsel: full 32 bits */
#define fsel(v,a) ((v)+(a))
/* lsel: select left 21 bits */
#define lsel(v,a) (((v)+(a))>>11)
/* rsel: select right 11 bits */
#define rsel(v,a) (((v)+(a))&0x7ff)
/* lrsel with rounding of addend to nearest 8k */
#define lrsel(v,a) (((v)+rnd(a))>>11)
/* rrsel with rounding of addend to nearest 8k */
#define rrsel(v,a) ((((v)+rnd(a))&0x7ff)+((a)-rnd(a)))
#define mask(x,sz) ((x) & ~((1<<(sz))-1))
/* The reassemble_* functions prepare an immediate value for
insertion into an opcode. pa-risc uses all sorts of weird bitfields
in the instruction to hold the value. */
static inline int reassemble_14(int as14)
{
return (((as14 & 0x1fff) << 1) |
((as14 & 0x2000) >> 13));
}
static inline int reassemble_17(int as17)
{
return (((as17 & 0x10000) >> 16) |
((as17 & 0x0f800) << 5) |
((as17 & 0x00400) >> 8) |
((as17 & 0x003ff) << 3));
}
static inline int reassemble_21(int as21)
{
return (((as21 & 0x100000) >> 20) |
((as21 & 0x0ffe00) >> 8) |
((as21 & 0x000180) << 7) |
((as21 & 0x00007c) << 14) |
((as21 & 0x000003) << 12));
}
static inline int reassemble_22(int as22)
{
return (((as22 & 0x200000) >> 21) |
((as22 & 0x1f0000) << 5) |
((as22 & 0x00f800) << 5) |
((as22 & 0x000400) >> 8) |
((as22 & 0x0003ff) << 3));
}
void *module_alloc(unsigned long size)
{
if (size == 0)
return NULL;
return vmalloc(size);
}
#ifndef __LP64__
static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
{
return 0;
}
static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
{
return 0;
}
static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
{
unsigned long cnt = 0;
for (; n > 0; n--, rela++)
{
switch (ELF32_R_TYPE(rela->r_info)) {
case R_PARISC_PCREL17F:
case R_PARISC_PCREL22F:
cnt++;
}
}
return cnt;
}
#else
static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
{
unsigned long cnt = 0;
for (; n > 0; n--, rela++)
{
switch (ELF64_R_TYPE(rela->r_info)) {
case R_PARISC_LTOFF21L:
case R_PARISC_LTOFF14R:
case R_PARISC_PCREL22F:
cnt++;
}
}
return cnt;
}
static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
{
unsigned long cnt = 0;
for (; n > 0; n--, rela++)
{
switch (ELF64_R_TYPE(rela->r_info)) {
case R_PARISC_FPTR64:
cnt++;
}
}
return cnt;
}
static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
{
unsigned long cnt = 0;
for (; n > 0; n--, rela++)
{
switch (ELF64_R_TYPE(rela->r_info)) {
case R_PARISC_PCREL22F:
cnt++;
}
}
return cnt;
}
#endif
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
vfree(module_region);
/* FIXME: If module_region == mod->init_region, trim exception
table entries. */
}
#define CONST
int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
CONST Elf_Shdr *sechdrs,
CONST char *secstrings,
struct module *me)
{
unsigned long gots = 0, fdescs = 0, stubs = 0, init_stubs = 0;
unsigned int i;
for (i = 1; i < hdr->e_shnum; i++) {
const Elf_Rela *rels = (void *)hdr + sechdrs[i].sh_offset;
unsigned long nrels = sechdrs[i].sh_size / sizeof(*rels);
if (strncmp(secstrings + sechdrs[i].sh_name,
".PARISC.unwind", 14) == 0)
me->arch.unwind_section = i;
if (sechdrs[i].sh_type != SHT_RELA)
continue;
/* some of these are not relevant for 32-bit/64-bit
* we leave them here to make the code common. the
* compiler will do its thing and optimize out the
* stuff we don't need
*/
gots += count_gots(rels, nrels);
fdescs += count_fdescs(rels, nrels);
if(strncmp(secstrings + sechdrs[i].sh_name,
".rela.init", 10) == 0)
init_stubs += count_stubs(rels, nrels);
else
stubs += count_stubs(rels, nrels);
}
/* align things a bit */
me->core_size = ALIGN(me->core_size, 16);
me->arch.got_offset = me->core_size;
me->core_size += gots * sizeof(struct got_entry);
me->core_size = ALIGN(me->core_size, 16);
me->arch.fdesc_offset = me->core_size;
me->core_size += fdescs * sizeof(Elf_Fdesc);
me->core_size = ALIGN(me->core_size, 16);
me->arch.stub_offset = me->core_size;
me->core_size += stubs * sizeof(struct stub_entry);
me->init_size = ALIGN(me->init_size, 16);
me->arch.init_stub_offset = me->init_size;
me->init_size += init_stubs * sizeof(struct stub_entry);
me->arch.got_max = gots;
me->arch.fdesc_max = fdescs;
me->arch.stub_max = stubs;
me->arch.init_stub_max = init_stubs;
return 0;
}
#ifdef __LP64__
static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
{
unsigned int i;
struct got_entry *got;
value += addend;
BUG_ON(value == 0);
got = me->module_core + me->arch.got_offset;
for (i = 0; got[i].addr; i++)
if (got[i].addr == value)
goto out;
BUG_ON(++me->arch.got_count > me->arch.got_max);
got[i].addr = value;
out:
DEBUGP("GOT ENTRY %d[%x] val %lx\n", i, i*sizeof(struct got_entry),
value);
return i * sizeof(struct got_entry);
}
#endif /* __LP64__ */
#ifdef __LP64__
static Elf_Addr get_fdesc(struct module *me, unsigned long value)
{
Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
if (!value) {
printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
return 0;
}
/* Look for existing fdesc entry. */
while (fdesc->addr) {
if (fdesc->addr == value)
return (Elf_Addr)fdesc;
fdesc++;
}
BUG_ON(++me->arch.fdesc_count > me->arch.fdesc_max);
/* Create new one */
fdesc->addr = value;
fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
return (Elf_Addr)fdesc;
}
#endif /* __LP64__ */
static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
int millicode, int init_section)
{
unsigned long i;
struct stub_entry *stub;
if(init_section) {
i = me->arch.init_stub_count++;
BUG_ON(me->arch.init_stub_count > me->arch.init_stub_max);
stub = me->module_init + me->arch.init_stub_offset +
i * sizeof(struct stub_entry);
} else {
i = me->arch.stub_count++;
BUG_ON(me->arch.stub_count > me->arch.stub_max);
stub = me->module_core + me->arch.stub_offset +
i * sizeof(struct stub_entry);
}
#ifndef __LP64__
/* for 32-bit the stub looks like this:
* ldil L'XXX,%r1
* be,n R'XXX(%sr4,%r1)
*/
//value = *(unsigned long *)((value + addend) & ~3); /* why? */
stub->insns[0] = 0x20200000; /* ldil L'XXX,%r1 */
stub->insns[1] = 0xe0202002; /* be,n R'XXX(%sr4,%r1) */
stub->insns[0] |= reassemble_21(lrsel(value, addend));
stub->insns[1] |= reassemble_17(rrsel(value, addend) / 4);
#else
/* for 64-bit we have two kinds of stubs:
* for normal function calls:
* ldd 0(%dp),%dp
* ldd 10(%dp), %r1
* bve (%r1)
* ldd 18(%dp), %dp
*
* for millicode:
* ldil 0, %r1
* ldo 0(%r1), %r1
* ldd 10(%r1), %r1
* bve,n (%r1)
*/
if (!millicode)
{
stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */
stub->insns[1] = 0x53610020; /* ldd 10(%dp),%r1 */
stub->insns[2] = 0xe820d000; /* bve (%r1) */
stub->insns[3] = 0x537b0030; /* ldd 18(%dp),%dp */
stub->insns[0] |= reassemble_14(get_got(me, value, addend) & 0x3fff);
}
else
{
stub->insns[0] = 0x20200000; /* ldil 0,%r1 */
stub->insns[1] = 0x34210000; /* ldo 0(%r1), %r1 */
stub->insns[2] = 0x50210020; /* ldd 10(%r1),%r1 */
stub->insns[3] = 0xe820d002; /* bve,n (%r1) */
stub->insns[0] |= reassemble_21(lrsel(value, addend));
stub->insns[1] |= reassemble_14(rrsel(value, addend));
}
#endif
return (Elf_Addr)stub;
}
int apply_relocate(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
/* parisc should not need this ... */
printk(KERN_ERR "module %s: RELOCATION unsupported\n",
me->name);
return -ENOEXEC;
}
#ifndef __LP64__
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
Elf32_Word *loc;
Elf32_Addr val;
Elf32_Sword addend;
Elf32_Addr dot;
//unsigned long dp = (unsigned long)$global$;
register unsigned long dp asm ("r27");
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
if (!sym->st_value) {
printk(KERN_WARNING "%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
return -ENOENT;
}
//dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
dot = (Elf32_Addr)loc & ~0x03;
val = sym->st_value;
addend = rel[i].r_addend;
#if 0
#define r(t) ELF32_R_TYPE(rel[i].r_info)==t ? #t :
DEBUGP("Symbol %s loc 0x%x val 0x%x addend 0x%x: %s\n",
strtab + sym->st_name,
(uint32_t)loc, val, addend,
r(R_PARISC_PLABEL32)
r(R_PARISC_DIR32)
r(R_PARISC_DIR21L)
r(R_PARISC_DIR14R)
r(R_PARISC_SEGREL32)
r(R_PARISC_DPREL21L)
r(R_PARISC_DPREL14R)
r(R_PARISC_PCREL17F)
r(R_PARISC_PCREL22F)
"UNKNOWN");
#undef r
#endif
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_PARISC_PLABEL32:
/* 32-bit function address */
/* no function descriptors... */
*loc = fsel(val, addend);
break;
case R_PARISC_DIR32:
/* direct 32-bit ref */
*loc = fsel(val, addend);
break;
case R_PARISC_DIR21L:
/* left 21 bits of effective address */
val = lrsel(val, addend);
*loc = mask(*loc, 21) | reassemble_21(val);
break;
case R_PARISC_DIR14R:
/* right 14 bits of effective address */
val = rrsel(val, addend);
*loc = mask(*loc, 14) | reassemble_14(val);
break;
case R_PARISC_SEGREL32:
/* 32-bit segment relative address */
/* See note about special handling of SEGREL32 at
* the beginning of this file.
*/
*loc = fsel(val, addend);
break;
case R_PARISC_DPREL21L:
/* left 21 bit of relative address */
val = lrsel(val - dp, addend);
*loc = mask(*loc, 21) | reassemble_21(val);
break;
case R_PARISC_DPREL14R:
/* right 14 bit of relative address */
val = rrsel(val - dp, addend);
*loc = mask(*loc, 14) | reassemble_14(val);
break;
case R_PARISC_PCREL17F:
/* 17-bit PC relative address */
val = get_stub(me, val, addend, 0, is_init(me, loc));
val = (val - dot - 8)/4;
CHECK_RELOC(val, 17)
*loc = (*loc & ~0x1f1ffd) | reassemble_17(val);
break;
case R_PARISC_PCREL22F:
/* 22-bit PC relative address; only defined for pa20 */
val = get_stub(me, val, addend, 0, is_init(me, loc));
DEBUGP("STUB FOR %s loc %lx+%lx at %lx\n",
strtab + sym->st_name, (unsigned long)loc, addend,
val)
val = (val - dot - 8)/4;
CHECK_RELOC(val, 22);
*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
#else
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
int i;
Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf64_Sym *sym;
Elf64_Word *loc;
Elf64_Xword *loc64;
Elf64_Addr val;
Elf64_Sxword addend;
Elf64_Addr dot;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to */
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ ELF64_R_SYM(rel[i].r_info);
if (!sym->st_value) {
printk(KERN_WARNING "%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
return -ENOENT;
}
//dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
dot = (Elf64_Addr)loc & ~0x03;
loc64 = (Elf64_Xword *)loc;
val = sym->st_value;
addend = rel[i].r_addend;
#if 0
#define r(t) ELF64_R_TYPE(rel[i].r_info)==t ? #t :
printk("Symbol %s loc %p val 0x%Lx addend 0x%Lx: %s\n",
strtab + sym->st_name,
loc, val, addend,
r(R_PARISC_LTOFF14R)
r(R_PARISC_LTOFF21L)
r(R_PARISC_PCREL22F)
r(R_PARISC_DIR64)
r(R_PARISC_SEGREL32)
r(R_PARISC_FPTR64)
"UNKNOWN");
#undef r
#endif
switch (ELF64_R_TYPE(rel[i].r_info)) {
case R_PARISC_LTOFF21L:
/* LT-relative; left 21 bits */
val = get_got(me, val, addend);
DEBUGP("LTOFF21L Symbol %s loc %p val %lx\n",
strtab + sym->st_name,
loc, val);
val = lrsel(val, 0);
*loc = mask(*loc, 21) | reassemble_21(val);
break;
case R_PARISC_LTOFF14R:
/* L(ltoff(val+addend)) */
/* LT-relative; right 14 bits */
val = get_got(me, val, addend);
val = rrsel(val, 0);
DEBUGP("LTOFF14R Symbol %s loc %p val %lx\n",
strtab + sym->st_name,
loc, val);
*loc = mask(*loc, 14) | reassemble_14(val);
break;
case R_PARISC_PCREL22F:
/* PC-relative; 22 bits */
DEBUGP("PCREL22F Symbol %s loc %p val %lx\n",
strtab + sym->st_name,
loc, val);
/* can we reach it locally? */
if(!is_local(me, (void *)val)) {
if (strncmp(strtab + sym->st_name, "$$", 2)
== 0)
val = get_stub(me, val, addend, 1,
is_init(me, loc));
else
val = get_stub(me, val, addend, 0,
is_init(me, loc));
}
DEBUGP("STUB FOR %s loc %lx, val %lx+%lx at %lx\n",
strtab + sym->st_name, loc, sym->st_value,
addend, val);
/* FIXME: local symbols work as long as the
* core and init pieces aren't separated too
* far. If this is ever broken, you will trip
* the check below. The way to fix it would
* be to generate local stubs to go between init
* and core */
if((Elf64_Sxword)(val - dot - 8) > 0x800000 -1 ||
(Elf64_Sxword)(val - dot - 8) < -0x800000) {
printk(KERN_ERR "Module %s, symbol %s is out of range for PCREL22F relocation\n",
me->name, strtab + sym->st_name);
return -ENOEXEC;
}
val = (val - dot - 8)/4;
*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
break;
case R_PARISC_DIR64:
/* 64-bit effective address */
*loc64 = val + addend;
break;
case R_PARISC_SEGREL32:
/* 32-bit segment relative address */
/* See note about special handling of SEGREL32 at
* the beginning of this file.
*/
*loc = fsel(val, addend);
break;
case R_PARISC_FPTR64:
/* 64-bit function address */
if(is_local(me, (void *)(val + addend))) {
*loc64 = get_fdesc(me, val+addend);
DEBUGP("FDESC for %s at %p points to %lx\n",
strtab + sym->st_name, *loc64,
((Elf_Fdesc *)*loc64)->addr);
} else {
/* if the symbol is not local to this
* module then val+addend is a pointer
* to the function descriptor */
DEBUGP("Non local FPTR64 Symbol %s loc %p val %lx\n",
strtab + sym->st_name,
loc, val);
*loc64 = val + addend;
}
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %Lu\n",
me->name, ELF64_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
#endif
static void
register_unwind_table(struct module *me,
const Elf_Shdr *sechdrs)
{
unsigned char *table, *end;
unsigned long gp;
if (!me->arch.unwind_section)
return;
table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
end = table + sechdrs[me->arch.unwind_section].sh_size;
gp = (Elf_Addr)me->module_core + me->arch.got_offset;
DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
me->arch.unwind_section, table, end, gp);
me->arch.unwind = unwind_table_add(me->name, 0, gp, table, end);
}
static void
deregister_unwind_table(struct module *me)
{
if (me->arch.unwind)
unwind_table_remove(me->arch.unwind);
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
int i;
unsigned long nsyms;
const char *strtab = NULL;
Elf_Sym *newptr, *oldptr;
Elf_Shdr *symhdr = NULL;
#ifdef DEBUG
Elf_Fdesc *entry;
u32 *addr;
entry = (Elf_Fdesc *)me->init;
printk("FINALIZE, ->init FPTR is %p, GP %lx ADDR %lx\n", entry,
entry->gp, entry->addr);
addr = (u32 *)entry->addr;
printk("INSNS: %x %x %x %x\n",
addr[0], addr[1], addr[2], addr[3]);
printk("stubs used %ld, stubs max %ld\n"
"init_stubs used %ld, init stubs max %ld\n"
"got entries used %ld, gots max %ld\n"
"fdescs used %ld, fdescs max %ld\n",
me->arch.stub_count, me->arch.stub_max,
me->arch.init_stub_count, me->arch.init_stub_max,
me->arch.got_count, me->arch.got_max,
me->arch.fdesc_count, me->arch.fdesc_max);
#endif
register_unwind_table(me, sechdrs);
/* haven't filled in me->symtab yet, so have to find it
* ourselves */
for (i = 1; i < hdr->e_shnum; i++) {
if(sechdrs[i].sh_type == SHT_SYMTAB
&& (sechdrs[i].sh_type & SHF_ALLOC)) {
int strindex = sechdrs[i].sh_link;
/* FIXME: AWFUL HACK
* The cast is to drop the const from
* the sechdrs pointer */
symhdr = (Elf_Shdr *)&sechdrs[i];
strtab = (char *)sechdrs[strindex].sh_addr;
break;
}
}
DEBUGP("module %s: strtab %p, symhdr %p\n",
me->name, strtab, symhdr);
if(me->arch.got_count > MAX_GOTS) {
printk(KERN_ERR "%s: Global Offset Table overflow (used %ld, allowed %d\n", me->name, me->arch.got_count, MAX_GOTS);
return -EINVAL;
}
/* no symbol table */
if(symhdr == NULL)
return 0;
oldptr = (void *)symhdr->sh_addr;
newptr = oldptr + 1; /* we start counting at 1 */
nsyms = symhdr->sh_size / sizeof(Elf_Sym);
DEBUGP("OLD num_symtab %lu\n", nsyms);
for (i = 1; i < nsyms; i++) {
oldptr++; /* note, count starts at 1 so preincrement */
if(strncmp(strtab + oldptr->st_name,
".L", 2) == 0)
continue;
if(newptr != oldptr)
*newptr++ = *oldptr;
else
newptr++;
}
nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
DEBUGP("NEW num_symtab %lu\n", nsyms);
symhdr->sh_size = nsyms * sizeof(Elf_Sym);
return 0;
}
void module_arch_cleanup(struct module *mod)
{
deregister_unwind_table(mod);
}

View File

@@ -0,0 +1,49 @@
/*
* linux/arch/parisc/kernel/pa7300lc.c
* - PA7300LC-specific functions
*
* Copyright (C) 2000 Philipp Rumpf */
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
/* CPU register indices */
#define MIOC_STATUS 0xf040
#define MIOC_CONTROL 0xf080
#define MDERRADD 0xf0e0
#define DMAERR 0xf0e8
#define DIOERR 0xf0ec
#define HIDMAMEM 0xf0f4
/* this returns the HPA of the CPU it was called on */
static u32 cpu_hpa(void)
{
return 0xfffb0000;
}
static void pa7300lc_lpmc(int code, struct pt_regs *regs)
{
u32 hpa;
printk(KERN_WARNING "LPMC on CPU %d\n", smp_processor_id());
show_regs(regs);
hpa = cpu_hpa();
printk(KERN_WARNING
"MIOC_CONTROL %08x\n" "MIOC_STATUS %08x\n"
"MDERRADD %08x\n" "DMAERR %08x\n"
"DIOERR %08x\n" "HIDMAMEM %08x\n",
gsc_readl(hpa+MIOC_CONTROL), gsc_readl(hpa+MIOC_STATUS),
gsc_readl(hpa+MDERRADD), gsc_readl(hpa+DMAERR),
gsc_readl(hpa+DIOERR), gsc_readl(hpa+HIDMAMEM));
}
void pa7300lc_init(void)
{
cpu_lpmc = pa7300lc_lpmc;
}

1086
arch/parisc/kernel/pacache.S Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,187 @@
/*
* Architecture-specific kernel symbols
*
* Copyright (C) 2000-2001 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2001 Dave Kennedy
* Copyright (C) 2001 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2001-2003 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2002-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq at parisc-linux.org>
* Copyright (C) 2002-2003 Helge Deller <deller with parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/string.h>
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strncat);
EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strpbrk);
#include <linux/pm.h>
EXPORT_SYMBOL(pm_power_off);
#include <asm/atomic.h>
EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32);
EXPORT_SYMBOL(__cmpxchg_u32);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(__atomic_hash);
#endif
#ifdef __LP64__
EXPORT_SYMBOL(__xchg64);
EXPORT_SYMBOL(__cmpxchg_u64);
#endif
#include <asm/uaccess.h>
EXPORT_SYMBOL(lstrncpy_from_user);
EXPORT_SYMBOL(lclear_user);
EXPORT_SYMBOL(lstrnlen_user);
/* Global fixups */
extern void fixup_get_user_skip_1(void);
extern void fixup_get_user_skip_2(void);
extern void fixup_put_user_skip_1(void);
extern void fixup_put_user_skip_2(void);
EXPORT_SYMBOL(fixup_get_user_skip_1);
EXPORT_SYMBOL(fixup_get_user_skip_2);
EXPORT_SYMBOL(fixup_put_user_skip_1);
EXPORT_SYMBOL(fixup_put_user_skip_2);
#ifndef __LP64__
/* Needed so insmod can set dp value */
extern int $global$;
EXPORT_SYMBOL($global$);
#endif
#include <asm/io.h>
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(memcpy_toio);
EXPORT_SYMBOL(memcpy_fromio);
EXPORT_SYMBOL(memset_io);
#include <asm/unistd.h>
EXPORT_SYMBOL(sys_open);
EXPORT_SYMBOL(sys_lseek);
EXPORT_SYMBOL(sys_read);
EXPORT_SYMBOL(sys_write);
#include <asm/semaphore.h>
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL(__down);
extern void $$divI(void);
extern void $$divU(void);
extern void $$remI(void);
extern void $$remU(void);
extern void $$mulI(void);
extern void $$divU_3(void);
extern void $$divU_5(void);
extern void $$divU_6(void);
extern void $$divU_9(void);
extern void $$divU_10(void);
extern void $$divU_12(void);
extern void $$divU_7(void);
extern void $$divU_14(void);
extern void $$divU_15(void);
extern void $$divI_3(void);
extern void $$divI_5(void);
extern void $$divI_6(void);
extern void $$divI_7(void);
extern void $$divI_9(void);
extern void $$divI_10(void);
extern void $$divI_12(void);
extern void $$divI_14(void);
extern void $$divI_15(void);
EXPORT_SYMBOL($$divI);
EXPORT_SYMBOL($$divU);
EXPORT_SYMBOL($$remI);
EXPORT_SYMBOL($$remU);
EXPORT_SYMBOL($$mulI);
EXPORT_SYMBOL($$divU_3);
EXPORT_SYMBOL($$divU_5);
EXPORT_SYMBOL($$divU_6);
EXPORT_SYMBOL($$divU_9);
EXPORT_SYMBOL($$divU_10);
EXPORT_SYMBOL($$divU_12);
EXPORT_SYMBOL($$divU_7);
EXPORT_SYMBOL($$divU_14);
EXPORT_SYMBOL($$divU_15);
EXPORT_SYMBOL($$divI_3);
EXPORT_SYMBOL($$divI_5);
EXPORT_SYMBOL($$divI_6);
EXPORT_SYMBOL($$divI_7);
EXPORT_SYMBOL($$divI_9);
EXPORT_SYMBOL($$divI_10);
EXPORT_SYMBOL($$divI_12);
EXPORT_SYMBOL($$divI_14);
EXPORT_SYMBOL($$divI_15);
extern void __ashrdi3(void);
extern void __ashldi3(void);
extern void __lshrdi3(void);
extern void __muldi3(void);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
asmlinkage void * __canonicalize_funcptr_for_compare(void *);
EXPORT_SYMBOL(__canonicalize_funcptr_for_compare);
#ifdef __LP64__
extern void __divdi3(void);
extern void __udivdi3(void);
extern void __umoddi3(void);
extern void __moddi3(void);
EXPORT_SYMBOL(__divdi3);
EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__umoddi3);
EXPORT_SYMBOL(__moddi3);
#endif
#ifndef __LP64__
extern void $$dyncall(void);
EXPORT_SYMBOL($$dyncall);
#endif
#ifdef CONFIG_DISCONTIGMEM
#include <asm/mmzone.h>
EXPORT_SYMBOL(node_data);
EXPORT_SYMBOL(pfnnid_map);
#endif

View File

@@ -0,0 +1,578 @@
/*
** PARISC 1.1 Dynamic DMA mapping support.
** This implementation is for PA-RISC platforms that do not support
** I/O TLBs (aka DMA address translation hardware).
** See Documentation/DMA-mapping.txt for interface definitions.
**
** (c) Copyright 1999,2000 Hewlett-Packard Company
** (c) Copyright 2000 Grant Grundler
** (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
** (c) Copyright 2000 John Marvin
**
** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
** (I assume it's from David Mosberger-Tang but there was no Copyright)
**
** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
**
** - ggg
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/cacheflush.h>
#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
#include <asm/io.h>
#include <asm/page.h> /* get_order */
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
static struct proc_dir_entry * proc_gsc_root = NULL;
static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length);
static unsigned long pcxl_used_bytes = 0;
static unsigned long pcxl_used_pages = 0;
extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
static spinlock_t pcxl_res_lock;
static char *pcxl_res_map;
static int pcxl_res_hint;
static int pcxl_res_size;
#ifdef DEBUG_PCXL_RESOURCE
#define DBG_RES(x...) printk(x)
#else
#define DBG_RES(x...)
#endif
/*
** Dump a hex representation of the resource map.
*/
#ifdef DUMP_RESMAP
static
void dump_resmap(void)
{
u_long *res_ptr = (unsigned long *)pcxl_res_map;
u_long i = 0;
printk("res_map: ");
for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
printk("%08lx ", *res_ptr);
printk("\n");
}
#else
static inline void dump_resmap(void) {;}
#endif
static int pa11_dma_supported( struct device *dev, u64 mask)
{
return 1;
}
static inline int map_pte_uncached(pte_t * pte,
unsigned long vaddr,
unsigned long size, unsigned long *paddr_ptr)
{
unsigned long end;
unsigned long orig_vaddr = vaddr;
vaddr &= ~PMD_MASK;
end = vaddr + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
if (!pte_none(*pte))
printk(KERN_ERR "map_pte_uncached: page already exists\n");
set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
purge_tlb_start();
pdtlb_kernel(orig_vaddr);
purge_tlb_end();
vaddr += PAGE_SIZE;
orig_vaddr += PAGE_SIZE;
(*paddr_ptr) += PAGE_SIZE;
pte++;
} while (vaddr < end);
return 0;
}
static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
unsigned long size, unsigned long *paddr_ptr)
{
unsigned long end;
unsigned long orig_vaddr = vaddr;
vaddr &= ~PGDIR_MASK;
end = vaddr + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
do {
pte_t * pte = pte_alloc_kernel(&init_mm, pmd, vaddr);
if (!pte)
return -ENOMEM;
if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
return -ENOMEM;
vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
orig_vaddr += PMD_SIZE;
pmd++;
} while (vaddr < end);
return 0;
}
static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
unsigned long paddr)
{
pgd_t * dir;
unsigned long end = vaddr + size;
dir = pgd_offset_k(vaddr);
do {
pmd_t *pmd;
pmd = pmd_alloc(NULL, dir, vaddr);
if (!pmd)
return -ENOMEM;
if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
return -ENOMEM;
vaddr = vaddr + PGDIR_SIZE;
dir++;
} while (vaddr && (vaddr < end));
return 0;
}
static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
unsigned long size)
{
pte_t * pte;
unsigned long end;
unsigned long orig_vaddr = vaddr;
if (pmd_none(*pmd))
return;
if (pmd_bad(*pmd)) {
pmd_ERROR(*pmd);
pmd_clear(pmd);
return;
}
pte = pte_offset_map(pmd, vaddr);
vaddr &= ~PMD_MASK;
end = vaddr + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
pte_t page = *pte;
pte_clear(&init_mm, vaddr, pte);
purge_tlb_start();
pdtlb_kernel(orig_vaddr);
purge_tlb_end();
vaddr += PAGE_SIZE;
orig_vaddr += PAGE_SIZE;
pte++;
if (pte_none(page) || pte_present(page))
continue;
printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
} while (vaddr < end);
}
static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
unsigned long size)
{
pmd_t * pmd;
unsigned long end;
unsigned long orig_vaddr = vaddr;
if (pgd_none(*dir))
return;
if (pgd_bad(*dir)) {
pgd_ERROR(*dir);
pgd_clear(dir);
return;
}
pmd = pmd_offset(dir, vaddr);
vaddr &= ~PGDIR_MASK;
end = vaddr + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
do {
unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
orig_vaddr += PMD_SIZE;
pmd++;
} while (vaddr < end);
}
static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
{
pgd_t * dir;
unsigned long end = vaddr + size;
dir = pgd_offset_k(vaddr);
do {
unmap_uncached_pmd(dir, vaddr, end - vaddr);
vaddr = vaddr + PGDIR_SIZE;
dir++;
} while (vaddr && (vaddr < end));
}
#define PCXL_SEARCH_LOOP(idx, mask, size) \
for(; res_ptr < res_end; ++res_ptr) \
{ \
if(0 == ((*res_ptr) & mask)) { \
*res_ptr |= mask; \
idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
pcxl_res_hint = idx + (size >> 3); \
goto resource_found; \
} \
}
#define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \
u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
PCXL_SEARCH_LOOP(idx, mask, size); \
res_ptr = (u##size *)&pcxl_res_map[0]; \
PCXL_SEARCH_LOOP(idx, mask, size); \
}
unsigned long
pcxl_alloc_range(size_t size)
{
int res_idx;
u_long mask, flags;
unsigned int pages_needed = size >> PAGE_SHIFT;
mask = (u_long) -1L;
mask >>= BITS_PER_LONG - pages_needed;
DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
size, pages_needed, mask);
spin_lock_irqsave(&pcxl_res_lock, flags);
if(pages_needed <= 8) {
PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
} else if(pages_needed <= 16) {
PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
} else if(pages_needed <= 32) {
PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
} else {
panic("%s: pcxl_alloc_range() Too many pages to map.\n",
__FILE__);
}
dump_resmap();
panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
__FILE__);
resource_found:
DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
res_idx, mask, pcxl_res_hint);
pcxl_used_pages += pages_needed;
pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
spin_unlock_irqrestore(&pcxl_res_lock, flags);
dump_resmap();
/*
** return the corresponding vaddr in the pcxl dma map
*/
return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
}
#define PCXL_FREE_MAPPINGS(idx, m, size) \
u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
/* BUG_ON((*res_ptr & m) != m); */ \
*res_ptr &= ~m;
/*
** clear bits in the pcxl resource map
*/
static void
pcxl_free_range(unsigned long vaddr, size_t size)
{
u_long mask, flags;
unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
unsigned int pages_mapped = size >> PAGE_SHIFT;
mask = (u_long) -1L;
mask >>= BITS_PER_LONG - pages_mapped;
DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
res_idx, size, pages_mapped, mask);
spin_lock_irqsave(&pcxl_res_lock, flags);
if(pages_mapped <= 8) {
PCXL_FREE_MAPPINGS(res_idx, mask, 8);
} else if(pages_mapped <= 16) {
PCXL_FREE_MAPPINGS(res_idx, mask, 16);
} else if(pages_mapped <= 32) {
PCXL_FREE_MAPPINGS(res_idx, mask, 32);
} else {
panic("%s: pcxl_free_range() Too many pages to unmap.\n",
__FILE__);
}
pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
spin_unlock_irqrestore(&pcxl_res_lock, flags);
dump_resmap();
}
static int __init
pcxl_dma_init(void)
{
if (pcxl_dma_start == 0)
return 0;
spin_lock_init(&pcxl_res_lock);
pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
pcxl_res_hint = 0;
pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
get_order(pcxl_res_size));
memset(pcxl_res_map, 0, pcxl_res_size);
proc_gsc_root = proc_mkdir("gsc", 0);
create_proc_info_entry("dino", 0, proc_gsc_root, pcxl_proc_info);
return 0;
}
__initcall(pcxl_dma_init);
static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flag)
{
unsigned long vaddr;
unsigned long paddr;
int order;
order = get_order(size);
size = 1 << (order + PAGE_SHIFT);
vaddr = pcxl_alloc_range(size);
paddr = __get_free_pages(flag, order);
flush_kernel_dcache_range(paddr, size);
paddr = __pa(paddr);
map_uncached_pages(vaddr, size, paddr);
*dma_handle = (dma_addr_t) paddr;
#if 0
/* This probably isn't needed to support EISA cards.
** ISA cards will certainly only support 24-bit DMA addressing.
** Not clear if we can, want, or need to support ISA.
*/
if (!dev || *dev->coherent_dma_mask < 0xffffffff)
gfp |= GFP_DMA;
#endif
return (void *)vaddr;
}
static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
{
int order;
order = get_order(size);
size = 1 << (order + PAGE_SHIFT);
unmap_uncached_pages((unsigned long)vaddr, size);
pcxl_free_range((unsigned long)vaddr, size);
free_pages((unsigned long)__va(dma_handle), order);
}
static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
{
if (direction == DMA_NONE) {
printk(KERN_ERR "pa11_dma_map_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
BUG();
}
flush_kernel_dcache_range((unsigned long) addr, size);
return virt_to_phys(addr);
}
static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
if (direction == DMA_NONE) {
printk(KERN_ERR "pa11_dma_unmap_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
BUG();
}
if (direction == DMA_TO_DEVICE)
return;
/*
* For PCI_DMA_FROMDEVICE this flush is not necessary for the
* simple map/unmap case. However, it IS necessary if if
* pci_dma_sync_single_* has been called and the buffer reused.
*/
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
return;
}
static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
if (direction == DMA_NONE)
BUG();
for (i = 0; i < nents; i++, sglist++ ) {
unsigned long vaddr = sg_virt_addr(sglist);
sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(vaddr);
sg_dma_len(sglist) = sglist->length;
flush_kernel_dcache_range(vaddr, sglist->length);
}
return nents;
}
static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
if (direction == DMA_NONE)
BUG();
if (direction == DMA_TO_DEVICE)
return;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for (i = 0; i < nents; i++, sglist++ )
flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
return;
}
static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
{
if (direction == DMA_NONE)
BUG();
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
}
static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
{
if (direction == DMA_NONE)
BUG();
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
}
static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for (i = 0; i < nents; i++, sglist++ )
flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
}
static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for (i = 0; i < nents; i++, sglist++ )
flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
}
struct hppa_dma_ops pcxl_dma_ops = {
.dma_supported = pa11_dma_supported,
.alloc_consistent = pa11_dma_alloc_consistent,
.alloc_noncoherent = pa11_dma_alloc_consistent,
.free_consistent = pa11_dma_free_consistent,
.map_single = pa11_dma_map_single,
.unmap_single = pa11_dma_unmap_single,
.map_sg = pa11_dma_map_sg,
.unmap_sg = pa11_dma_unmap_sg,
.dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
.dma_sync_single_for_device = pa11_dma_sync_single_for_device,
.dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
};
static void *fail_alloc_consistent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int flag)
{
return NULL;
}
static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int flag)
{
void *addr = NULL;
/* rely on kmalloc to be cacheline aligned */
addr = kmalloc(size, flag);
if(addr)
*dma_handle = (dma_addr_t)virt_to_phys(addr);
return addr;
}
static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t iova)
{
kfree(vaddr);
return;
}
struct hppa_dma_ops pcx_dma_ops = {
.dma_supported = pa11_dma_supported,
.alloc_consistent = fail_alloc_consistent,
.alloc_noncoherent = pa11_dma_alloc_noncoherent,
.free_consistent = pa11_dma_free_noncoherent,
.map_single = pa11_dma_map_single,
.unmap_single = pa11_dma_unmap_single,
.map_sg = pa11_dma_map_sg,
.unmap_sg = pa11_dma_unmap_sg,
.dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
.dma_sync_single_for_device = pa11_dma_sync_single_for_device,
.dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
};
static int pcxl_proc_info(char *buf, char **start, off_t offset, int len)
{
u_long i = 0;
unsigned long *res_ptr = (u_long *)pcxl_res_map;
unsigned long total_pages = pcxl_res_size << 3; /* 8 bits per byte */
sprintf(buf, "\nDMA Mapping Area size : %d bytes (%d pages)\n",
PCXL_DMA_MAP_SIZE,
(pcxl_res_size << 3) ); /* 1 bit per page */
sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n",
buf, pcxl_res_size, pcxl_res_size << 3); /* 8 bits per byte */
strcat(buf, " total: free: used: % used:\n");
sprintf(buf, "%sblocks %8d %8ld %8ld %8ld%%\n", buf, pcxl_res_size,
pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
(pcxl_used_bytes * 100) / pcxl_res_size);
sprintf(buf, "%spages %8ld %8ld %8ld %8ld%%\n", buf, total_pages,
total_pages - pcxl_used_pages, pcxl_used_pages,
(pcxl_used_pages * 100 / total_pages));
strcat(buf, "\nResource bitmap:");
for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
if ((i & 7) == 0)
strcat(buf,"\n ");
sprintf(buf, "%s %08lx", buf, *res_ptr);
}
strcat(buf, "\n");
return strlen(buf);
}

346
arch/parisc/kernel/pci.c Normal file
View File

@@ -0,0 +1,346 @@
/* $Id: pci.c,v 1.6 2000/01/29 00:12:05 grundler Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1997, 1998 Ralf Baechle
* Copyright (C) 1999 SuSE GmbH
* Copyright (C) 1999-2001 Hewlett-Packard Company
* Copyright (C) 1999-2001 Grant Grundler
*/
#include <linux/config.h>
#include <linux/eisa.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/cache.h> /* for L1_CACHE_BYTES */
#include <asm/superio.h>
#define DEBUG_RESOURCES 0
#define DEBUG_CONFIG 0
#if DEBUG_CONFIG
# define DBGC(x...) printk(KERN_DEBUG x)
#else
# define DBGC(x...)
#endif
#if DEBUG_RESOURCES
#define DBG_RES(x...) printk(KERN_DEBUG x)
#else
#define DBG_RES(x...)
#endif
/* To be used as: mdelay(pci_post_reset_delay);
*
* post_reset is the time the kernel should stall to prevent anyone from
* accessing the PCI bus once #RESET is de-asserted.
* PCI spec somewhere says 1 second but with multi-PCI bus systems,
* this makes the boot time much longer than necessary.
* 20ms seems to work for all the HP PCI implementations to date.
*
* XXX: turn into a #defined constant in <asm/pci.h> ?
*/
int pci_post_reset_delay = 50;
struct pci_port_ops *pci_port;
struct pci_bios_ops *pci_bios;
int pci_hba_count = 0;
/* parisc_pci_hba used by pci_port->in/out() ops to lookup bus data. */
#define PCI_HBA_MAX 32
struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX];
/********************************************************************
**
** I/O port space support
**
*********************************************************************/
/* EISA port numbers and PCI port numbers share the same interface. Some
* machines have both EISA and PCI adapters installed. Rather than turn
* pci_port into an array, we reserve bus 0 for EISA and call the EISA
* routines if the access is to a port on bus 0. We don't want to fix
* EISA and ISA drivers which assume port space is <= 0xffff.
*/
#ifdef CONFIG_EISA
#define EISA_IN(size) if (EISA_bus && (b == 0)) return eisa_in##size(addr)
#define EISA_OUT(size) if (EISA_bus && (b == 0)) return eisa_out##size(d, addr)
#else
#define EISA_IN(size)
#define EISA_OUT(size)
#endif
#define PCI_PORT_IN(type, size) \
u##size in##type (int addr) \
{ \
int b = PCI_PORT_HBA(addr); \
EISA_IN(size); \
if (!parisc_pci_hba[b]) return (u##size) -1; \
return pci_port->in##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr)); \
} \
EXPORT_SYMBOL(in##type);
PCI_PORT_IN(b, 8)
PCI_PORT_IN(w, 16)
PCI_PORT_IN(l, 32)
#define PCI_PORT_OUT(type, size) \
void out##type (u##size d, int addr) \
{ \
int b = PCI_PORT_HBA(addr); \
EISA_OUT(size); \
if (!parisc_pci_hba[b]) return; \
pci_port->out##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr), d); \
} \
EXPORT_SYMBOL(out##type);
PCI_PORT_OUT(b, 8)
PCI_PORT_OUT(w, 16)
PCI_PORT_OUT(l, 32)
/*
* BIOS32 replacement.
*/
static int __init pcibios_init(void)
{
if (!pci_bios)
return -1;
if (pci_bios->init) {
pci_bios->init();
} else {
printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
}
return 0;
}
/* Called from pci_do_scan_bus() *after* walking a bus but before walking PPBs. */
void pcibios_fixup_bus(struct pci_bus *bus)
{
if (pci_bios->fixup_bus) {
pci_bios->fixup_bus(bus);
} else {
printk(KERN_WARNING "pci_bios != NULL but fixup_bus() is!\n");
}
}
char *pcibios_setup(char *str)
{
return str;
}
/*
* Called by pci_set_master() - a driver interface.
*
* Legacy PDC guarantees to set:
* Map Memory BAR's into PA IO space.
* Map Expansion ROM BAR into one common PA IO space per bus.
* Map IO BAR's into PCI IO space.
* Command (see below)
* Cache Line Size
* Latency Timer
* Interrupt Line
* PPB: secondary latency timer, io/mmio base/limit,
* bus numbers, bridge control
*
*/
void pcibios_set_master(struct pci_dev *dev)
{
u8 lat;
/* If someone already mucked with this, don't touch it. */
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
if (lat >= 16) return;
/*
** HP generally has fewer devices on the bus than other architectures.
** upper byte is PCI_LATENCY_TIMER.
*/
pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
(0x80 << 8) | (L1_CACHE_BYTES / sizeof(u32)));
}
void __init pcibios_init_bus(struct pci_bus *bus)
{
struct pci_dev *dev = bus->self;
unsigned short bridge_ctl;
/* We deal only with pci controllers and pci-pci bridges. */
if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
return;
/* PCI-PCI bridge - set the cache line and default latency
(32) for primary and secondary buses. */
pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
}
/* KLUGE: Link the child and parent resources - generic PCI didn't */
static void
pcibios_link_hba_resources( struct resource *hba_res, struct resource *r)
{
if (!r->parent) {
printk(KERN_EMERG "PCI: Tell willy he's wrong\n");
r->parent = hba_res;
/* reverse link is harder *sigh* */
if (r->parent->child) {
if (r->parent->sibling) {
struct resource *next = r->parent->sibling;
while (next->sibling)
next = next->sibling;
next->sibling = r;
} else {
r->parent->sibling = r;
}
} else
r->parent->child = r;
}
}
/* called by drivers/pci/setup-bus.c:pci_setup_bridge(). */
void __devinit pcibios_resource_to_bus(struct pci_dev *dev,
struct pci_bus_region *region, struct resource *res)
{
struct pci_bus *bus = dev->bus;
struct pci_hba_data *hba = HBA_DATA(bus->bridge->platform_data);
if (res->flags & IORESOURCE_IO) {
/*
** I/O space may see busnumbers here. Something
** in the form of 0xbbxxxx where bb is the bus num
** and xxxx is the I/O port space address.
** Remaining address translation are done in the
** PCI Host adapter specific code - ie dino_out8.
*/
region->start = PCI_PORT_ADDR(res->start);
region->end = PCI_PORT_ADDR(res->end);
} else if (res->flags & IORESOURCE_MEM) {
/* Convert MMIO addr to PCI addr (undo global virtualization) */
region->start = PCI_BUS_ADDR(hba, res->start);
region->end = PCI_BUS_ADDR(hba, res->end);
}
DBG_RES("pcibios_resource_to_bus(%02x %s [%lx,%lx])\n",
bus->number, res->flags & IORESOURCE_IO ? "IO" : "MEM",
region->start, region->end);
/* KLUGE ALERT
** if this resource isn't linked to a "parent", then it seems
** to be a child of the HBA - lets link it in.
*/
pcibios_link_hba_resources(&hba->io_space, bus->resource[0]);
pcibios_link_hba_resources(&hba->lmmio_space, bus->resource[1]);
}
#ifdef CONFIG_HOTPLUG
EXPORT_SYMBOL(pcibios_resource_to_bus);
#endif
/*
* pcibios align resources() is called every time generic PCI code
* wants to generate a new address. The process of looking for
* an available address, each candidate is first "aligned" and
* then checked if the resource is available until a match is found.
*
* Since we are just checking candidates, don't use any fields other
* than res->start.
*/
void pcibios_align_resource(void *data, struct resource *res,
unsigned long size, unsigned long alignment)
{
unsigned long mask, align;
DBG_RES("pcibios_align_resource(%s, (%p) [%lx,%lx]/%x, 0x%lx, 0x%lx)\n",
pci_name(((struct pci_dev *) data)),
res->parent, res->start, res->end,
(int) res->flags, size, alignment);
/* If it's not IO, then it's gotta be MEM */
align = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
/* Align to largest of MIN or input size */
mask = max(alignment, align) - 1;
res->start += mask;
res->start &= ~mask;
/* The caller updates the end field, we don't. */
}
/*
* A driver is enabling the device. We make sure that all the appropriate
* bits are set to allow the device to operate as the driver is expecting.
* We enable the port IO and memory IO bits if the device has any BARs of
* that type, and we enable the PERR and SERR bits unconditionally.
* Drivers that do not need parity (eg graphics and possibly networking)
* can clear these bits if they want.
*/
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
u16 cmd;
int idx;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
for (idx = 0; idx < DEVICE_COUNT_RESOURCE; idx++) {
struct resource *r = &dev->resource[idx];
/* only setup requested resources */
if (!(mask & (1<<idx)))
continue;
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
cmd |= (PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
#if 0
/* If bridge/bus controller has FBB enabled, child must too. */
if (dev->bus->bridge_ctl & PCI_BRIDGE_CTL_FAST_BACK)
cmd |= PCI_COMMAND_FAST_BACK;
#endif
DBGC("PCIBIOS: Enabling device %s cmd 0x%04x\n", pci_name(dev), cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
return 0;
}
/* PA-RISC specific */
void pcibios_register_hba(struct pci_hba_data *hba)
{
if (pci_hba_count >= PCI_HBA_MAX) {
printk(KERN_ERR "PCI: Too many Host Bus Adapters\n");
return;
}
parisc_pci_hba[pci_hba_count] = hba;
hba->hba_num = pci_hba_count++;
}
subsys_initcall(pcibios_init);

View File

@@ -0,0 +1,245 @@
/*
* interfaces to log Chassis Codes via PDC (firmware)
*
* Copyright (C) 2002 Laurent Canet <canetl@esiee.fr>
* Copyright (C) 2002-2004 Thibaut VARENE <varenet@parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#undef PDC_CHASSIS_DEBUG
#ifdef PDC_CHASSIS_DEBUG
#define DPRINTK(fmt, args...) printk(fmt, ## args)
#else
#define DPRINTK(fmt, args...)
#endif
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
#include <asm/pdc_chassis.h>
#include <asm/processor.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#ifdef CONFIG_PDC_CHASSIS
static int pdc_chassis_old = 0;
static unsigned int pdc_chassis_enabled = 1;
/**
* pdc_chassis_setup() - Enable/disable pdc_chassis code at boot time.
* @str configuration param: 0 to disable chassis log
* @return 1
*/
static int __init pdc_chassis_setup(char *str)
{
/*panic_timeout = simple_strtoul(str, NULL, 0);*/
get_option(&str, &pdc_chassis_enabled);
return 1;
}
__setup("pdcchassis=", pdc_chassis_setup);
/**
* pdc_chassis_checkold() - Checks for old PDC_CHASSIS compatibility
* @pdc_chassis_old: 1 if old pdc chassis style
*
* Currently, only E class and A180 are known to work with this.
* Inspired by Christoph Plattner
*/
static void __init pdc_chassis_checkold(void)
{
switch(CPU_HVERSION) {
case 0x480: /* E25 */
case 0x481: /* E35 */
case 0x482: /* E45 */
case 0x483: /* E55 */
case 0x516: /* A180 */
pdc_chassis_old = 1;
break;
default:
break;
}
DPRINTK(KERN_DEBUG "%s: pdc_chassis_checkold(); pdc_chassis_old = %d\n", __FILE__, pdc_chassis_old);
}
/**
* pdc_chassis_panic_event() - Called by the panic handler.
*
* As soon as a panic occurs, we should inform the PDC.
*/
static int pdc_chassis_panic_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
return NOTIFY_DONE;
}
static struct notifier_block pdc_chassis_panic_block = {
.notifier_call = pdc_chassis_panic_event,
.priority = INT_MAX,
};
/**
* parisc_reboot_event() - Called by the reboot handler.
*
* As soon as a reboot occurs, we should inform the PDC.
*/
static int pdc_chassis_reboot_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
return NOTIFY_DONE;
}
static struct notifier_block pdc_chassis_reboot_block = {
.notifier_call = pdc_chassis_reboot_event,
.priority = INT_MAX,
};
#endif /* CONFIG_PDC_CHASSIS */
/**
* parisc_pdc_chassis_init() - Called at boot time.
*/
void __init parisc_pdc_chassis_init(void)
{
#ifdef CONFIG_PDC_CHASSIS
int handle = 0;
if (pdc_chassis_enabled) {
DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__);
/* Let see if we have something to handle... */
/* Check for PDC_PAT or old LED Panel */
pdc_chassis_checkold();
if (is_pdc_pat()) {
printk(KERN_INFO "Enabling PDC_PAT chassis codes support.\n");
handle = 1;
}
else if (pdc_chassis_old) {
printk(KERN_INFO "Enabling old style chassis LED panel support.\n");
handle = 1;
}
if (handle) {
/* initialize panic notifier chain */
notifier_chain_register(&panic_notifier_list, &pdc_chassis_panic_block);
/* initialize reboot notifier chain */
register_reboot_notifier(&pdc_chassis_reboot_block);
}
}
#endif /* CONFIG_PDC_CHASSIS */
}
/**
* pdc_chassis_send_status() - Sends a predefined message to the chassis,
* and changes the front panel LEDs according to the new system state
* @retval: PDC call return value.
*
* Only machines with 64 bits PDC PAT and those reported in
* pdc_chassis_checkold() are supported atm.
*
* returns 0 if no error, -1 if no supported PDC is present or invalid message,
* else returns the appropriate PDC error code.
*
* For a list of predefined messages, see asm-parisc/pdc_chassis.h
*/
int pdc_chassis_send_status(int message)
{
/* Maybe we should do that in an other way ? */
int retval = 0;
#ifdef CONFIG_PDC_CHASSIS
if (pdc_chassis_enabled) {
DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message);
#ifdef CONFIG_64BIT
if (is_pdc_pat()) {
switch(message) {
case PDC_CHASSIS_DIRECT_BSTART:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BSTART, PDC_CHASSIS_LSTATE_RUN_NORMAL);
break;
case PDC_CHASSIS_DIRECT_BCOMPLETE:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BCOMPLETE, PDC_CHASSIS_LSTATE_RUN_NORMAL);
break;
case PDC_CHASSIS_DIRECT_SHUTDOWN:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_SHUTDOWN, PDC_CHASSIS_LSTATE_NONOS);
break;
case PDC_CHASSIS_DIRECT_PANIC:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_PANIC, PDC_CHASSIS_LSTATE_RUN_CRASHREC);
break;
case PDC_CHASSIS_DIRECT_LPMC:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_LPMC, PDC_CHASSIS_LSTATE_RUN_SYSINT);
break;
case PDC_CHASSIS_DIRECT_HPMC:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_HPMC, PDC_CHASSIS_LSTATE_RUN_NCRIT);
break;
default:
retval = -1;
}
} else retval = -1;
#else
if (pdc_chassis_old) {
switch (message) {
case PDC_CHASSIS_DIRECT_BSTART:
case PDC_CHASSIS_DIRECT_BCOMPLETE:
retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_RUN));
break;
case PDC_CHASSIS_DIRECT_SHUTDOWN:
retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_SHUT));
break;
case PDC_CHASSIS_DIRECT_HPMC:
case PDC_CHASSIS_DIRECT_PANIC:
retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_FLT));
break;
case PDC_CHASSIS_DIRECT_LPMC:
retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_WARN));
break;
default:
retval = -1;
}
} else retval = -1;
#endif /* CONFIG_64BIT */
} /* if (pdc_chassis_enabled) */
#endif /* CONFIG_PDC_CHASSIS */
return retval;
}

View File

@@ -0,0 +1,189 @@
/*
* PDC Console support - ie use firmware to dump text via boot console
*
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
* Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
* Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
* Copyright (C) 2001 Helge Deller <deller at parisc-linux.org>
* Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* The PDC console is a simple console, which can be used for debugging
* boot related problems on HP PA-RISC machines.
*
* This code uses the ROM (=PDC) based functions to read and write characters
* from and to PDC's boot path.
* Since all character read from that path must be polled, this code never
* can or will be a fully functional linux console.
*/
/* Define EARLY_BOOTUP_DEBUG to debug kernel related boot problems.
* On production kernels EARLY_BOOTUP_DEBUG should be undefined. */
#undef EARLY_BOOTUP_DEBUG
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/major.h>
#include <linux/tty.h>
#include <asm/page.h>
#include <asm/types.h>
#include <asm/system.h>
#include <asm/pdc.h> /* for iodc_call() proto and friends */
static void pdc_console_write(struct console *co, const char *s, unsigned count)
{
while(count--)
pdc_iodc_putc(*s++);
}
void pdc_outc(unsigned char c)
{
pdc_iodc_outc(c);
}
void pdc_printf(const char *fmt, ...)
{
va_list args;
char buf[1024];
int i, len;
va_start(args, fmt);
len = vscnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
for (i = 0; i < len; i++)
pdc_iodc_outc(buf[i]);
}
int pdc_console_poll_key(struct console *co)
{
return pdc_iodc_getc();
}
static int pdc_console_setup(struct console *co, char *options)
{
return 0;
}
#if defined(CONFIG_PDC_CONSOLE)
#define PDC_CONSOLE_DEVICE pdc_console_device
static struct tty_driver * pdc_console_device (struct console *c, int *index)
{
extern struct tty_driver console_driver;
*index = c->index ? c->index-1 : fg_console;
return &console_driver;
}
#else
#define PDC_CONSOLE_DEVICE NULL
#endif
static struct console pdc_cons = {
.name = "ttyB",
.write = pdc_console_write,
.device = PDC_CONSOLE_DEVICE,
.setup = pdc_console_setup,
.flags = CON_BOOT|CON_PRINTBUFFER|CON_ENABLED,
.index = -1,
};
static int pdc_console_initialized;
extern unsigned long con_start; /* kernel/printk.c */
extern unsigned long log_end; /* kernel/printk.c */
static void pdc_console_init_force(void)
{
if (pdc_console_initialized)
return;
++pdc_console_initialized;
/* If the console is duplex then copy the COUT parameters to CIN. */
if (PAGE0->mem_cons.cl_class == CL_DUPLEX)
memcpy(&PAGE0->mem_kbd, &PAGE0->mem_cons, sizeof(PAGE0->mem_cons));
/* register the pdc console */
register_console(&pdc_cons);
}
void __init pdc_console_init(void)
{
#if defined(EARLY_BOOTUP_DEBUG) || defined(CONFIG_PDC_CONSOLE)
pdc_console_init_force();
#endif
#ifdef EARLY_BOOTUP_DEBUG
printk(KERN_INFO "Initialized PDC Console for debugging.\n");
#endif
}
/* Unregister the pdc console with the printk console layer */
void pdc_console_die(void)
{
if (!pdc_console_initialized)
return;
--pdc_console_initialized;
printk(KERN_INFO "Switching from PDC console\n");
/* Don't repeat what we've already printed */
con_start = log_end;
unregister_console(&pdc_cons);
}
/*
* Used for emergencies. Currently only used if an HPMC occurs. If an
* HPMC occurs, it is possible that the current console may not be
* properly initialed after the PDC IO reset. This routine unregisters all
* of the current consoles, reinitializes the pdc console and
* registers it.
*/
void pdc_console_restart(void)
{
struct console *console;
if (pdc_console_initialized)
return;
while ((console = console_drivers) != NULL)
unregister_console(console_drivers);
/* Don't repeat what we've already printed */
con_start = log_end;
/* force registering the pdc console */
pdc_console_init_force();
}

841
arch/parisc/kernel/perf.c Normal file
View File

@@ -0,0 +1,841 @@
/*
* Parisc performance counters
* Copyright (C) 2001 Randolph Chung <tausq@debian.org>
*
* This code is derived, with permission, from HP/UX sources.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* Edited comment from original sources:
*
* This driver programs the PCX-U/PCX-W performance counters
* on the PA-RISC 2.0 chips. The driver keeps all images now
* internally to the kernel to hopefully eliminate the possiblity
* of a bad image halting the CPU. Also, there are different
* images for the PCX-W and later chips vs the PCX-U chips.
*
* Only 1 process is allowed to access the driver at any time,
* so the only protection that is needed is at open and close.
* A variable "perf_enabled" is used to hold the state of the
* driver. The spinlock "perf_lock" is used to protect the
* modification of the state during open/close operations so
* multiple processes don't get into the driver simultaneously.
*
* This driver accesses the processor directly vs going through
* the PDC INTRIGUE calls. This is done to eliminate bugs introduced
* in various PDC revisions. The code is much more maintainable
* and reliable this way vs having to debug on every version of PDC
* on every box.
*/
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <asm/uaccess.h>
#include <asm/perf.h>
#include <asm/parisc-device.h>
#include <asm/processor.h>
#include <asm/runway.h>
#include <asm/io.h> /* for __raw_read() */
#include "perf_images.h"
#define MAX_RDR_WORDS 24
#define PERF_VERSION 2 /* derived from hpux's PI v2 interface */
/* definition of RDR regs */
struct rdr_tbl_ent {
uint16_t width;
uint8_t num_words;
uint8_t write_control;
};
static int perf_processor_interface = UNKNOWN_INTF;
static int perf_enabled = 0;
static spinlock_t perf_lock;
struct parisc_device *cpu_device = NULL;
/* RDRs to write for PCX-W */
static int perf_rdrs_W[] =
{ 0, 1, 4, 5, 6, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
/* RDRs to write for PCX-U */
static int perf_rdrs_U[] =
{ 0, 1, 4, 5, 6, 7, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
/* RDR register descriptions for PCX-W */
static struct rdr_tbl_ent perf_rdr_tbl_W[] = {
{ 19, 1, 8 }, /* RDR 0 */
{ 16, 1, 16 }, /* RDR 1 */
{ 72, 2, 0 }, /* RDR 2 */
{ 81, 2, 0 }, /* RDR 3 */
{ 328, 6, 0 }, /* RDR 4 */
{ 160, 3, 0 }, /* RDR 5 */
{ 336, 6, 0 }, /* RDR 6 */
{ 164, 3, 0 }, /* RDR 7 */
{ 0, 0, 0 }, /* RDR 8 */
{ 35, 1, 0 }, /* RDR 9 */
{ 6, 1, 0 }, /* RDR 10 */
{ 18, 1, 0 }, /* RDR 11 */
{ 13, 1, 0 }, /* RDR 12 */
{ 8, 1, 0 }, /* RDR 13 */
{ 8, 1, 0 }, /* RDR 14 */
{ 8, 1, 0 }, /* RDR 15 */
{ 1530, 24, 0 }, /* RDR 16 */
{ 16, 1, 0 }, /* RDR 17 */
{ 4, 1, 0 }, /* RDR 18 */
{ 0, 0, 0 }, /* RDR 19 */
{ 152, 3, 24 }, /* RDR 20 */
{ 152, 3, 24 }, /* RDR 21 */
{ 233, 4, 48 }, /* RDR 22 */
{ 233, 4, 48 }, /* RDR 23 */
{ 71, 2, 0 }, /* RDR 24 */
{ 71, 2, 0 }, /* RDR 25 */
{ 11, 1, 0 }, /* RDR 26 */
{ 18, 1, 0 }, /* RDR 27 */
{ 128, 2, 0 }, /* RDR 28 */
{ 0, 0, 0 }, /* RDR 29 */
{ 16, 1, 0 }, /* RDR 30 */
{ 16, 1, 0 }, /* RDR 31 */
};
/* RDR register descriptions for PCX-U */
static struct rdr_tbl_ent perf_rdr_tbl_U[] = {
{ 19, 1, 8 }, /* RDR 0 */
{ 32, 1, 16 }, /* RDR 1 */
{ 20, 1, 0 }, /* RDR 2 */
{ 0, 0, 0 }, /* RDR 3 */
{ 344, 6, 0 }, /* RDR 4 */
{ 176, 3, 0 }, /* RDR 5 */
{ 336, 6, 0 }, /* RDR 6 */
{ 0, 0, 0 }, /* RDR 7 */
{ 0, 0, 0 }, /* RDR 8 */
{ 0, 0, 0 }, /* RDR 9 */
{ 28, 1, 0 }, /* RDR 10 */
{ 33, 1, 0 }, /* RDR 11 */
{ 0, 0, 0 }, /* RDR 12 */
{ 230, 4, 0 }, /* RDR 13 */
{ 32, 1, 0 }, /* RDR 14 */
{ 128, 2, 0 }, /* RDR 15 */
{ 1494, 24, 0 }, /* RDR 16 */
{ 18, 1, 0 }, /* RDR 17 */
{ 4, 1, 0 }, /* RDR 18 */
{ 0, 0, 0 }, /* RDR 19 */
{ 158, 3, 24 }, /* RDR 20 */
{ 158, 3, 24 }, /* RDR 21 */
{ 194, 4, 48 }, /* RDR 22 */
{ 194, 4, 48 }, /* RDR 23 */
{ 71, 2, 0 }, /* RDR 24 */
{ 71, 2, 0 }, /* RDR 25 */
{ 28, 1, 0 }, /* RDR 26 */
{ 33, 1, 0 }, /* RDR 27 */
{ 88, 2, 0 }, /* RDR 28 */
{ 32, 1, 0 }, /* RDR 29 */
{ 24, 1, 0 }, /* RDR 30 */
{ 16, 1, 0 }, /* RDR 31 */
};
/*
* A non-zero write_control in the above tables is a byte offset into
* this array.
*/
static uint64_t perf_bitmasks[] = {
0x0000000000000000ul, /* first dbl word must be zero */
0xfdffe00000000000ul, /* RDR0 bitmask */
0x003f000000000000ul, /* RDR1 bitmask */
0x00fffffffffffffful, /* RDR20-RDR21 bitmask (152 bits) */
0xfffffffffffffffful,
0xfffffffc00000000ul,
0xfffffffffffffffful, /* RDR22-RDR23 bitmask (233 bits) */
0xfffffffffffffffful,
0xfffffffffffffffcul,
0xff00000000000000ul
};
/*
* Write control bitmasks for Pa-8700 processor given
* somethings have changed slightly.
*/
static uint64_t perf_bitmasks_piranha[] = {
0x0000000000000000ul, /* first dbl word must be zero */
0xfdffe00000000000ul, /* RDR0 bitmask */
0x003f000000000000ul, /* RDR1 bitmask */
0x00fffffffffffffful, /* RDR20-RDR21 bitmask (158 bits) */
0xfffffffffffffffful,
0xfffffffc00000000ul,
0xfffffffffffffffful, /* RDR22-RDR23 bitmask (210 bits) */
0xfffffffffffffffful,
0xfffffffffffffffful,
0xfffc000000000000ul
};
static uint64_t *bitmask_array; /* array of bitmasks to use */
/******************************************************************************
* Function Prototypes
*****************************************************************************/
static int perf_config(uint32_t *image_ptr);
static int perf_release(struct inode *inode, struct file *file);
static int perf_open(struct inode *inode, struct file *file);
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos);
static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg);
static void perf_start_counters(void);
static int perf_stop_counters(uint32_t *raddr);
static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer);
static int perf_rdr_clear(uint32_t rdr_num);
static int perf_write_image(uint64_t *memaddr);
static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer);
/* External Assembly Routines */
extern uint64_t perf_rdr_shift_in_W (uint32_t rdr_num, uint16_t width);
extern uint64_t perf_rdr_shift_in_U (uint32_t rdr_num, uint16_t width);
extern void perf_rdr_shift_out_W (uint32_t rdr_num, uint64_t buffer);
extern void perf_rdr_shift_out_U (uint32_t rdr_num, uint64_t buffer);
extern void perf_intrigue_enable_perf_counters (void);
extern void perf_intrigue_disable_perf_counters (void);
/******************************************************************************
* Function Definitions
*****************************************************************************/
/*
* configure:
*
* Configure the cpu with a given data image. First turn off the counters,
* then download the image, then turn the counters back on.
*/
static int perf_config(uint32_t *image_ptr)
{
long error;
uint32_t raddr[4];
/* Stop the counters*/
error = perf_stop_counters(raddr);
if (error != 0) {
printk("perf_config: perf_stop_counters = %ld\n", error);
return -EINVAL;
}
printk("Preparing to write image\n");
/* Write the image to the chip */
error = perf_write_image((uint64_t *)image_ptr);
if (error != 0) {
printk("perf_config: DOWNLOAD = %ld\n", error);
return -EINVAL;
}
printk("Preparing to start counters\n");
/* Start the counters */
perf_start_counters();
return sizeof(uint32_t);
}
/*
* Open the device and initialize all of its memory. The device is only
* opened once, but can be "queried" by multiple processes that know its
* file descriptor.
*/
static int perf_open(struct inode *inode, struct file *file)
{
spin_lock(&perf_lock);
if (perf_enabled) {
spin_unlock(&perf_lock);
return -EBUSY;
}
perf_enabled = 1;
spin_unlock(&perf_lock);
return 0;
}
/*
* Close the device.
*/
static int perf_release(struct inode *inode, struct file *file)
{
spin_lock(&perf_lock);
perf_enabled = 0;
spin_unlock(&perf_lock);
return 0;
}
/*
* Read does nothing for this driver
*/
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos)
{
return 0;
}
/*
* write:
*
* This routine downloads the image to the chip. It must be
* called on the processor that the download should happen
* on.
*/
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos)
{
int err;
size_t image_size;
uint32_t image_type;
uint32_t interface_type;
uint32_t test;
if (perf_processor_interface == ONYX_INTF)
image_size = PCXU_IMAGE_SIZE;
else if (perf_processor_interface == CUDA_INTF)
image_size = PCXW_IMAGE_SIZE;
else
return -EFAULT;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (count != sizeof(uint32_t))
return -EIO;
if ((err = copy_from_user(&image_type, buf, sizeof(uint32_t))) != 0)
return err;
/* Get the interface type and test type */
interface_type = (image_type >> 16) & 0xffff;
test = (image_type & 0xffff);
/* Make sure everything makes sense */
/* First check the machine type is correct for
the requested image */
if (((perf_processor_interface == CUDA_INTF) &&
(interface_type != CUDA_INTF)) ||
((perf_processor_interface == ONYX_INTF) &&
(interface_type != ONYX_INTF)))
return -EINVAL;
/* Next check to make sure the requested image
is valid */
if (((interface_type == CUDA_INTF) &&
(test >= MAX_CUDA_IMAGES)) ||
((interface_type == ONYX_INTF) &&
(test >= MAX_ONYX_IMAGES)))
return -EINVAL;
/* Copy the image into the processor */
if (interface_type == CUDA_INTF)
return perf_config(cuda_images[test]);
else
return perf_config(onyx_images[test]);
return count;
}
/*
* Patch the images that need to know the IVA addresses.
*/
static void perf_patch_images(void)
{
#if 0 /* FIXME!! */
/*
* NOTE: this routine is VERY specific to the current TLB image.
* If the image is changed, this routine might also need to be changed.
*/
extern void $i_itlb_miss_2_0();
extern void $i_dtlb_miss_2_0();
extern void PA2_0_iva();
/*
* We can only use the lower 32-bits, the upper 32-bits should be 0
* anyway given this is in the kernel
*/
uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
uint32_t IVAaddress = (uint32_t)&PA2_0_iva;
if (perf_processor_interface == ONYX_INTF) {
/* clear last 2 bytes */
onyx_images[TLBMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBMISS][17] = itlb_addr;
/* clear last 2 bytes */
onyx_images[TLBHANDMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBHANDMISS][17] = itlb_addr;
/* clear last 2 bytes */
onyx_images[BIG_CPI][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[BIG_CPI][17] = itlb_addr;
onyx_images[PANIC][15] &= 0xffffff00; /* clear last 2 bytes */
onyx_images[PANIC][15] |= (0x000000ff&((IVAaddress) >> 24)); /* set 2 bytes */
onyx_images[PANIC][16] = (IVAaddress << 8)&0xffffff00;
} else if (perf_processor_interface == CUDA_INTF) {
/* Cuda interface */
cuda_images[TLBMISS][16] =
(cuda_images[TLBMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
cuda_images[TLBMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
cuda_images[TLBHANDMISS][16] =
(cuda_images[TLBHANDMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
cuda_images[TLBHANDMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
cuda_images[BIG_CPI][16] =
(cuda_images[BIG_CPI][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
cuda_images[BIG_CPI][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
} else {
/* Unknown type */
}
#endif
}
/*
* ioctl routine
* All routines effect the processor that they are executed on. Thus you
* must be running on the processor that you wish to change.
*/
static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
{
long error_start;
uint32_t raddr[4];
switch (cmd) {
case PA_PERF_ON:
/* Start the counters */
perf_start_counters();
return 0;
case PA_PERF_OFF:
error_start = perf_stop_counters(raddr);
if (error_start != 0) {
printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start);
return -EFAULT;
}
/* copy out the Counters */
if (copy_to_user((void __user *)arg, raddr,
sizeof (raddr)) != 0) {
return -EFAULT;
}
return 0;
case PA_PERF_VERSION:
/* Return the version # */
return put_user(PERF_VERSION, (int *)arg);
default:
break;
}
return -ENOTTY;
}
static struct file_operations perf_fops = {
.llseek = no_llseek,
.read = perf_read,
.write = perf_write,
.ioctl = perf_ioctl,
.open = perf_open,
.release = perf_release
};
static struct miscdevice perf_dev = {
MISC_DYNAMIC_MINOR,
PA_PERF_DEV,
&perf_fops
};
/*
* Initialize the module
*/
static int __init perf_init(void)
{
int ret;
/* Determine correct processor interface to use */
bitmask_array = perf_bitmasks;
if (boot_cpu_data.cpu_type == pcxu ||
boot_cpu_data.cpu_type == pcxu_) {
perf_processor_interface = ONYX_INTF;
} else if (boot_cpu_data.cpu_type == pcxw ||
boot_cpu_data.cpu_type == pcxw_ ||
boot_cpu_data.cpu_type == pcxw2 ||
boot_cpu_data.cpu_type == mako) {
perf_processor_interface = CUDA_INTF;
if (boot_cpu_data.cpu_type == pcxw2 ||
boot_cpu_data.cpu_type == mako)
bitmask_array = perf_bitmasks_piranha;
} else {
perf_processor_interface = UNKNOWN_INTF;
printk("Performance monitoring counters not supported on this processor\n");
return -ENODEV;
}
ret = misc_register(&perf_dev);
if (ret) {
printk(KERN_ERR "Performance monitoring counters: "
"cannot register misc device.\n");
return ret;
}
/* Patch the images to match the system */
perf_patch_images();
spin_lock_init(&perf_lock);
/* TODO: this only lets us access the first cpu.. what to do for SMP? */
cpu_device = cpu_data[0].dev;
printk("Performance monitoring counters enabled for %s\n",
cpu_data[0].dev->name);
return 0;
}
/*
* perf_start_counters(void)
*
* Start the counters.
*/
static void perf_start_counters(void)
{
/* Enable performance monitor counters */
perf_intrigue_enable_perf_counters();
}
/*
* perf_stop_counters
*
* Stop the performance counters and save counts
* in a per_processor array.
*/
static int perf_stop_counters(uint32_t *raddr)
{
uint64_t userbuf[MAX_RDR_WORDS];
/* Disable performance counters */
perf_intrigue_disable_perf_counters();
if (perf_processor_interface == ONYX_INTF) {
uint64_t tmp64;
/*
* Read the counters
*/
if (!perf_rdr_read_ubuf(16, userbuf))
return -13;
/* Counter0 is bits 1398 thru 1429 */
tmp64 = (userbuf[21] << 22) & 0x00000000ffc00000;
tmp64 |= (userbuf[22] >> 42) & 0x00000000003fffff;
/* OR sticky0 (bit 1430) to counter0 bit 32 */
tmp64 |= (userbuf[22] >> 10) & 0x0000000080000000;
raddr[0] = (uint32_t)tmp64;
/* Counter1 is bits 1431 thru 1462 */
tmp64 = (userbuf[22] >> 9) & 0x00000000ffffffff;
/* OR sticky1 (bit 1463) to counter1 bit 32 */
tmp64 |= (userbuf[22] << 23) & 0x0000000080000000;
raddr[1] = (uint32_t)tmp64;
/* Counter2 is bits 1464 thru 1495 */
tmp64 = (userbuf[22] << 24) & 0x00000000ff000000;
tmp64 |= (userbuf[23] >> 40) & 0x0000000000ffffff;
/* OR sticky2 (bit 1496) to counter2 bit 32 */
tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
raddr[2] = (uint32_t)tmp64;
/* Counter3 is bits 1497 thru 1528 */
tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
/* OR sticky3 (bit 1529) to counter3 bit 32 */
tmp64 |= (userbuf[23] << 25) & 0x0000000080000000;
raddr[3] = (uint32_t)tmp64;
/*
* Zero out the counters
*/
/*
* The counters and sticky-bits comprise the last 132 bits
* (1398 - 1529) of RDR16 on a U chip. We'll zero these
* out the easy way: zero out last 10 bits of dword 21,
* all of dword 22 and 58 bits (plus 6 don't care bits) of
* dword 23.
*/
userbuf[21] &= 0xfffffffffffffc00ul; /* 0 to last 10 bits */
userbuf[22] = 0;
userbuf[23] = 0;
/*
* Write back the zero'ed bytes + the image given
* the read was destructive.
*/
perf_rdr_write(16, userbuf);
} else {
/*
* Read RDR-15 which contains the counters and sticky bits
*/
if (!perf_rdr_read_ubuf(15, userbuf)) {
return -13;
}
/*
* Clear out the counters
*/
perf_rdr_clear(15);
/*
* Copy the counters
*/
raddr[0] = (uint32_t)((userbuf[0] >> 32) & 0x00000000ffffffffUL);
raddr[1] = (uint32_t)(userbuf[0] & 0x00000000ffffffffUL);
raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
}
return 0;
}
/*
* perf_rdr_get_entry
*
* Retrieve a pointer to the description of what this
* RDR contains.
*/
static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num)
{
if (perf_processor_interface == ONYX_INTF) {
return &perf_rdr_tbl_U[rdr_num];
} else {
return &perf_rdr_tbl_W[rdr_num];
}
}
/*
* perf_rdr_read_ubuf
*
* Read the RDR value into the buffer specified.
*/
static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
{
uint64_t data, data_mask = 0;
uint32_t width, xbits, i;
struct rdr_tbl_ent *tentry;
tentry = perf_rdr_get_entry(rdr_num);
if ((width = tentry->width) == 0)
return 0;
/* Clear out buffer */
i = tentry->num_words;
while (i--) {
buffer[i] = 0;
}
/* Check for bits an even number of 64 */
if ((xbits = width & 0x03f) != 0) {
data_mask = 1;
data_mask <<= (64 - xbits);
data_mask--;
}
/* Grab all of the data */
i = tentry->num_words;
while (i--) {
if (perf_processor_interface == ONYX_INTF) {
data = perf_rdr_shift_in_U(rdr_num, width);
} else {
data = perf_rdr_shift_in_W(rdr_num, width);
}
if (xbits) {
buffer[i] |= (data << (64 - xbits));
if (i) {
buffer[i-1] |= ((data >> xbits) & data_mask);
}
} else {
buffer[i] = data;
}
}
return 1;
}
/*
* perf_rdr_clear
*
* Zero out the given RDR register
*/
static int perf_rdr_clear(uint32_t rdr_num)
{
struct rdr_tbl_ent *tentry;
int32_t i;
tentry = perf_rdr_get_entry(rdr_num);
if (tentry->width == 0) {
return -1;
}
i = tentry->num_words;
while (i--) {
if (perf_processor_interface == ONYX_INTF) {
perf_rdr_shift_out_U(rdr_num, 0UL);
} else {
perf_rdr_shift_out_W(rdr_num, 0UL);
}
}
return 0;
}
/*
* perf_write_image
*
* Write the given image out to the processor
*/
static int perf_write_image(uint64_t *memaddr)
{
uint64_t buffer[MAX_RDR_WORDS];
uint64_t *bptr;
uint32_t dwords;
uint32_t *intrigue_rdr;
uint64_t *intrigue_bitmask, tmp64, proc_hpa;
struct rdr_tbl_ent *tentry;
int i;
/* Clear out counters */
if (perf_processor_interface == ONYX_INTF) {
perf_rdr_clear(16);
/* Toggle performance monitor */
perf_intrigue_enable_perf_counters();
perf_intrigue_disable_perf_counters();
intrigue_rdr = perf_rdrs_U;
} else {
perf_rdr_clear(15);
intrigue_rdr = perf_rdrs_W;
}
/* Write all RDRs */
while (*intrigue_rdr != -1) {
tentry = perf_rdr_get_entry(*intrigue_rdr);
perf_rdr_read_ubuf(*intrigue_rdr, buffer);
bptr = &buffer[0];
dwords = tentry->num_words;
if (tentry->write_control) {
intrigue_bitmask = &bitmask_array[tentry->write_control >> 3];
while (dwords--) {
tmp64 = *intrigue_bitmask & *memaddr++;
tmp64 |= (~(*intrigue_bitmask++)) & *bptr;
*bptr++ = tmp64;
}
} else {
while (dwords--) {
*bptr++ = *memaddr++;
}
}
perf_rdr_write(*intrigue_rdr, buffer);
intrigue_rdr++;
}
/*
* Now copy out the Runway stuff which is not in RDRs
*/
if (cpu_device == NULL)
{
printk(KERN_ERR "write_image: cpu_device not yet initialized!\n");
return -1;
}
proc_hpa = cpu_device->hpa;
/* Merge intrigue bits into Runway STATUS 0 */
tmp64 = __raw_readq(proc_hpa + RUNWAY_STATUS) & 0xffecfffffffffffful;
__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), proc_hpa + RUNWAY_STATUS);
/* Write RUNWAY DEBUG registers */
for (i = 0; i < 8; i++) {
__raw_writeq(*memaddr++, proc_hpa + RUNWAY_DEBUG + i);
}
return 0;
}
/*
* perf_rdr_write
*
* Write the given RDR register with the contents
* of the given buffer.
*/
static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer)
{
struct rdr_tbl_ent *tentry;
int32_t i;
printk("perf_rdr_write\n");
tentry = perf_rdr_get_entry(rdr_num);
if (tentry->width == 0) { return; }
i = tentry->num_words;
while (i--) {
if (perf_processor_interface == ONYX_INTF) {
perf_rdr_shift_out_U(rdr_num, buffer[i]);
} else {
perf_rdr_shift_out_W(rdr_num, buffer[i]);
}
}
printk("perf_rdr_write done\n");
}
module_init(perf_init);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,396 @@
/*
* PARISC Architecture-dependent parts of process handling
* based on the work for i386
*
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
* Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
* Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com>
* Copyright (C) 2000 Richard Hirst <rhirst with parisc-lixux.org>
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
* Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
* Copyright (C) 2001-2002 Helge Deller <deller at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <stdarg.h>
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/kallsyms.h>
#include <asm/io.h>
#include <asm/offsets.h>
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/unwind.h>
static int hlt_counter;
/*
* Power off function, if any
*/
void (*pm_power_off)(void);
void disable_hlt(void)
{
hlt_counter++;
}
EXPORT_SYMBOL(disable_hlt);
void enable_hlt(void)
{
hlt_counter--;
}
EXPORT_SYMBOL(enable_hlt);
void default_idle(void)
{
barrier();
}
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
* somebody to say that they'd like to reschedule)
*/
void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
while (!need_resched())
barrier();
schedule();
check_pgt_cache();
}
}
#ifdef __LP64__
#define COMMAND_GLOBAL 0xfffffffffffe0030UL
#else
#define COMMAND_GLOBAL 0xfffe0030
#endif
#define CMD_RESET 5 /* reset any module */
/*
** The Wright Brothers and Gecko systems have a H/W problem
** (Lasi...'nuf said) may cause a broadcast reset to lockup
** the system. An HVERSION dependent PDC call was developed
** to perform a "safe", platform specific broadcast reset instead
** of kludging up all the code.
**
** Older machines which do not implement PDC_BROADCAST_RESET will
** return (with an error) and the regular broadcast reset can be
** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET
** the PDC call will not return (the system will be reset).
*/
void machine_restart(char *cmd)
{
#ifdef FASTBOOT_SELFTEST_SUPPORT
/*
** If user has modified the Firmware Selftest Bitmap,
** run the tests specified in the bitmap after the
** system is rebooted w/PDC_DO_RESET.
**
** ftc_bitmap = 0x1AUL "Skip destructive memory tests"
**
** Using "directed resets" at each processor with the MEM_TOC
** vector cleared will also avoid running destructive
** memory self tests. (Not implemented yet)
*/
if (ftc_bitmap) {
pdc_do_firm_test_reset(ftc_bitmap);
}
#endif
/* set up a new led state on systems shipped with a LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
/* "Normal" system reset */
pdc_do_reset();
/* Nope...box should reset with just CMD_RESET now */
gsc_writel(CMD_RESET, COMMAND_GLOBAL);
/* Wait for RESET to lay us to rest. */
while (1) ;
}
EXPORT_SYMBOL(machine_restart);
void machine_halt(void)
{
/*
** The LED/ChassisCodes are updated by the led_halt()
** function, called by the reboot notifier chain.
*/
}
EXPORT_SYMBOL(machine_halt);
/*
* This routine is called from sys_reboot to actually turn off the
* machine
*/
void machine_power_off(void)
{
/* If there is a registered power off handler, call it. */
if(pm_power_off)
pm_power_off();
/* Put the soft power button back under hardware control.
* If the user had already pressed the power button, the
* following call will immediately power off. */
pdc_soft_power_button(0);
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
/* It seems we have no way to power the system off via
* software. The user has to press the button himself. */
printk(KERN_EMERG "System shut down completed.\n"
KERN_EMERG "Please power this system off now.");
}
EXPORT_SYMBOL(machine_power_off);
/*
* Create a kernel thread
*/
extern pid_t __kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
/*
* FIXME: Once we are sure we don't need any debug here,
* kernel_thread can become a #define.
*/
return __kernel_thread(fn, arg, flags);
}
EXPORT_SYMBOL(kernel_thread);
/*
* Free current thread data structures etc..
*/
void exit_thread(void)
{
}
void flush_thread(void)
{
/* Only needs to handle fpu stuff or perf monitors.
** REVISIT: several arches implement a "lazy fpu state".
*/
set_fs(USER_DS);
}
void release_thread(struct task_struct *dead_task)
{
}
/*
* Fill in the FPU structure for a core dump.
*/
int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r)
{
if (regs == NULL)
return 0;
memcpy(r, regs->fr, sizeof *r);
return 1;
}
int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
{
memcpy(r, tsk->thread.regs.fr, sizeof(*r));
return 1;
}
/* Note that "fork()" is implemented in terms of clone, with
parameters (SIGCHLD, regs->gr[30], regs). */
int
sys_clone(unsigned long clone_flags, unsigned long usp,
struct pt_regs *regs)
{
int __user *user_tid = (int __user *)regs->gr[26];
/* usp must be word aligned. This also prevents users from
* passing in the value 1 (which is the signal for a special
* return for a kernel thread) */
usp = ALIGN(usp, 4);
/* A zero value for usp means use the current stack */
if(usp == 0)
usp = regs->gr[30];
return do_fork(clone_flags, usp, regs, 0, user_tid, NULL);
}
int
sys_vfork(struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gr[30], regs, 0, NULL, NULL);
}
int
copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
unsigned long unused, /* in ia64 this is "user_stack_size" */
struct task_struct * p, struct pt_regs * pregs)
{
struct pt_regs * cregs = &(p->thread.regs);
struct thread_info *ti = p->thread_info;
/* We have to use void * instead of a function pointer, because
* function pointers aren't a pointer to the function on 64-bit.
* Make them const so the compiler knows they live in .text */
extern void * const ret_from_kernel_thread;
extern void * const child_return;
#ifdef CONFIG_HPUX
extern void * const hpux_child_return;
#endif
*cregs = *pregs;
/* Set the return value for the child. Note that this is not
actually restored by the syscall exit path, but we put it
here for consistency in case of signals. */
cregs->gr[28] = 0; /* child */
/*
* We need to differentiate between a user fork and a
* kernel fork. We can't use user_mode, because the
* the syscall path doesn't save iaoq. Right now
* We rely on the fact that kernel_thread passes
* in zero for usp.
*/
if (usp == 1) {
/* kernel thread */
cregs->ksp = (((unsigned long)(ti)) + THREAD_SZ_ALGN);
/* Must exit via ret_from_kernel_thread in order
* to call schedule_tail()
*/
cregs->kpc = (unsigned long) &ret_from_kernel_thread;
/*
* Copy function and argument to be called from
* ret_from_kernel_thread.
*/
#ifdef __LP64__
cregs->gr[27] = pregs->gr[27];
#endif
cregs->gr[26] = pregs->gr[26];
cregs->gr[25] = pregs->gr[25];
} else {
/* user thread */
/*
* Note that the fork wrappers are responsible
* for setting gr[21].
*/
/* Use same stack depth as parent */
cregs->ksp = ((unsigned long)(ti))
+ (pregs->gr[21] & (THREAD_SIZE - 1));
cregs->gr[30] = usp;
if (p->personality == PER_HPUX) {
#ifdef CONFIG_HPUX
cregs->kpc = (unsigned long) &hpux_child_return;
#else
BUG();
#endif
} else {
cregs->kpc = (unsigned long) &child_return;
}
}
return 0;
}
unsigned long thread_saved_pc(struct task_struct *t)
{
return t->thread.regs.kpc;
}
/*
* sys_execve() executes a new program.
*/
asmlinkage int sys_execve(struct pt_regs *regs)
{
int error;
char *filename;
filename = getname((const char __user *) regs->gr[26]);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename, (char __user **) regs->gr[25],
(char __user **) regs->gr[24], regs);
if (error == 0) {
task_lock(current);
current->ptrace &= ~PT_DTRACE;
task_unlock(current);
}
putname(filename);
out:
return error;
}
unsigned long
get_wchan(struct task_struct *p)
{
struct unwind_frame_info info;
unsigned long ip;
int count = 0;
/*
* These bracket the sleeping functions..
*/
unwind_frame_init_from_blocked_task(&info, p);
do {
if (unwind_once(&info) < 0)
return 0;
ip = info.ip;
if (!in_sched_functions(ip))
return ip;
} while (count++ < 16);
return 0;
}

View File

@@ -0,0 +1,400 @@
/* $Id: processor.c,v 1.1 2002/07/20 16:27:06 rhirst Exp $
*
* Initial setup-routines for HP 9000 based hardware.
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
* Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de>
* Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
* Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
* Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
* Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
*
* Initial PA-RISC Version: 04-23-1999 by Helge Deller
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/config.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <asm/cache.h>
#include <asm/hardware.h> /* for register_parisc_driver() stuff */
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#include <asm/irq.h> /* for struct irq_region */
#include <asm/parisc-device.h>
struct system_cpuinfo_parisc boot_cpu_data;
EXPORT_SYMBOL(boot_cpu_data);
struct cpuinfo_parisc cpu_data[NR_CPUS];
/*
** PARISC CPU driver - claim "device" and initialize CPU data structures.
**
** Consolidate per CPU initialization into (mostly) one module.
** Monarch CPU will initialize boot_cpu_data which shouldn't
** change once the system has booted.
**
** The callback *should* do per-instance initialization of
** everything including the monarch. "Per CPU" init code in
** setup.c:start_parisc() has migrated here and start_parisc()
** will call register_parisc_driver(&cpu_driver) before calling do_inventory().
**
** The goal of consolidating CPU initialization into one place is
** to make sure all CPU's get initialized the same way.
** The code path not shared is how PDC hands control of the CPU to the OS.
** The initialization of OS data structures is the same (done below).
*/
/**
* processor_probe - Determine if processor driver should claim this device.
* @dev: The device which has been found.
*
* Determine if processor driver should claim this chip (return 0) or not
* (return 1). If so, initialize the chip and tell other partners in crime
* they have work to do.
*/
static int __init processor_probe(struct parisc_device *dev)
{
unsigned long txn_addr;
unsigned long cpuid;
struct cpuinfo_parisc *p;
#ifndef CONFIG_SMP
if (boot_cpu_data.cpu_count > 0) {
printk(KERN_INFO "CONFIG_SMP=n ignoring additional CPUs\n");
return 1;
}
#endif
/* logical CPU ID and update global counter
* May get overwritten by PAT code.
*/
cpuid = boot_cpu_data.cpu_count;
txn_addr = dev->hpa; /* for legacy PDC */
#ifdef __LP64__
if (is_pdc_pat()) {
ulong status;
unsigned long bytecnt;
pdc_pat_cell_mod_maddr_block_t pa_pdc_cell;
#undef USE_PAT_CPUID
#ifdef USE_PAT_CPUID
struct pdc_pat_cpu_num cpu_info;
#endif
status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc,
dev->mod_index, PA_VIEW, &pa_pdc_cell);
BUG_ON(PDC_OK != status);
/* verify it's the same as what do_pat_inventory() found */
BUG_ON(dev->mod_info != pa_pdc_cell.mod_info);
BUG_ON(dev->pmod_loc != pa_pdc_cell.mod_location);
txn_addr = pa_pdc_cell.mod[0]; /* id_eid for IO sapic */
#ifdef USE_PAT_CPUID
/* We need contiguous numbers for cpuid. Firmware's notion
* of cpuid is for physical CPUs and we just don't care yet.
* We'll care when we need to query PAT PDC about a CPU *after*
* boot time (ie shutdown a CPU from an OS perspective).
*/
/* get the cpu number */
status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa);
BUG_ON(PDC_OK != status);
if (cpu_info.cpu_num >= NR_CPUS) {
printk(KERN_WARNING "IGNORING CPU at 0x%x,"
" cpu_slot_id > NR_CPUS"
" (%ld > %d)\n",
dev->hpa, cpu_info.cpu_num, NR_CPUS);
/* Ignore CPU since it will only crash */
boot_cpu_data.cpu_count--;
return 1;
} else {
cpuid = cpu_info.cpu_num;
}
#endif
}
#endif
p = &cpu_data[cpuid];
boot_cpu_data.cpu_count++;
/* initialize counters */
memset(p, 0, sizeof(struct cpuinfo_parisc));
p->loops_per_jiffy = loops_per_jiffy;
p->dev = dev; /* Save IODC data in case we need it */
p->hpa = dev->hpa; /* save CPU hpa */
p->cpuid = cpuid; /* save CPU id */
p->txn_addr = txn_addr; /* save CPU IRQ address */
#ifdef CONFIG_SMP
spin_lock_init(&p->lock);
/*
** FIXME: review if any other initialization is clobbered
** for boot_cpu by the above memset().
*/
/* stolen from init_percpu_prof() */
cpu_data[cpuid].prof_counter = 1;
cpu_data[cpuid].prof_multiplier = 1;
#endif
/*
** CONFIG_SMP: init_smp_config() will attempt to get CPU's into
** OS control. RENDEZVOUS is the default state - see mem_set above.
** p->state = STATE_RENDEZVOUS;
*/
#if 0
/* CPU 0 IRQ table is statically allocated/initialized */
if (cpuid) {
struct irqaction actions[];
/*
** itimer and ipi IRQ handlers are statically initialized in
** arch/parisc/kernel/irq.c. ie Don't need to register them.
*/
actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC);
if (!actions) {
/* not getting it's own table, share with monarch */
actions = cpu_irq_actions[0];
}
cpu_irq_actions[cpuid] = actions;
}
#endif
/*
* Bring this CPU up now! (ignore bootstrap cpuid == 0)
*/
#ifdef CONFIG_SMP
if (cpuid) {
cpu_set(cpuid, cpu_present_map);
cpu_up(cpuid);
}
#endif
return 0;
}
/**
* collect_boot_cpu_data - Fill the boot_cpu_data structure.
*
* This function collects and stores the generic processor information
* in the boot_cpu_data structure.
*/
void __init collect_boot_cpu_data(void)
{
memset(&boot_cpu_data, 0, sizeof(boot_cpu_data));
boot_cpu_data.cpu_hz = 100 * PAGE0->mem_10msec; /* Hz of this PARISC */
/* get CPU-Model Information... */
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK)
printk(KERN_INFO
"model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
#undef p
if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK)
printk(KERN_INFO "vers %08lx\n",
boot_cpu_data.pdc.versions);
if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK)
printk(KERN_INFO "CPUID vers %ld rev %ld (0x%08lx)\n",
(boot_cpu_data.pdc.cpuid >> 5) & 127,
boot_cpu_data.pdc.cpuid & 31,
boot_cpu_data.pdc.cpuid);
if (pdc_model_capabilities(&boot_cpu_data.pdc.capabilities) == PDC_OK)
printk(KERN_INFO "capabilities 0x%lx\n",
boot_cpu_data.pdc.capabilities);
if (pdc_model_sysmodel(boot_cpu_data.pdc.sys_model_name) == PDC_OK)
printk(KERN_INFO "model %s\n",
boot_cpu_data.pdc.sys_model_name);
boot_cpu_data.hversion = boot_cpu_data.pdc.model.hversion;
boot_cpu_data.sversion = boot_cpu_data.pdc.model.sversion;
boot_cpu_data.cpu_type = parisc_get_cpu_type(boot_cpu_data.hversion);
boot_cpu_data.cpu_name = cpu_name_version[boot_cpu_data.cpu_type][0];
boot_cpu_data.family_name = cpu_name_version[boot_cpu_data.cpu_type][1];
}
/**
* init_cpu_profiler - enable/setup per cpu profiling hooks.
* @cpunum: The processor instance.
*
* FIXME: doesn't do much yet...
*/
static inline void __init
init_percpu_prof(int cpunum)
{
cpu_data[cpunum].prof_counter = 1;
cpu_data[cpunum].prof_multiplier = 1;
}
/**
* init_per_cpu - Handle individual processor initializations.
* @cpunum: logical processor number.
*
* This function handles initialization for *every* CPU
* in the system:
*
* o Set "default" CPU width for trap handlers
*
* o Enable FP coprocessor
* REVISIT: this could be done in the "code 22" trap handler.
* (frowands idea - that way we know which processes need FP
* registers saved on the interrupt stack.)
* NEWS FLASH: wide kernels need FP coprocessor enabled to handle
* formatted printing of %lx for example (double divides I think)
*
* o Enable CPU profiling hooks.
*/
int __init init_per_cpu(int cpunum)
{
int ret;
struct pdc_coproc_cfg coproc_cfg;
set_firmware_width();
ret = pdc_coproc_cfg(&coproc_cfg);
if(ret >= 0 && coproc_cfg.ccr_functional) {
mtctl(coproc_cfg.ccr_functional, 10); /* 10 == Coprocessor Control Reg */
/* FWIW, FP rev/model is a more accurate way to determine
** CPU type. CPU rev/model has some ambiguous cases.
*/
cpu_data[cpunum].fp_rev = coproc_cfg.revision;
cpu_data[cpunum].fp_model = coproc_cfg.model;
printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
cpunum, coproc_cfg.revision, coproc_cfg.model);
/*
** store status register to stack (hopefully aligned)
** and clear the T-bit.
*/
asm volatile ("fstd %fr0,8(%sp)");
} else {
printk(KERN_WARNING "WARNING: No FP CoProcessor?!"
" (coproc_cfg.ccr_functional == 0x%lx, expected 0xc0)\n"
#ifdef __LP64__
"Halting Machine - FP required\n"
#endif
, coproc_cfg.ccr_functional);
#ifdef __LP64__
mdelay(100); /* previous chars get pushed to console */
panic("FP CoProc not reported");
#endif
}
/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
init_percpu_prof(cpunum);
return ret;
}
/*
* Display cpu info for all cpu's.
*/
int
show_cpuinfo (struct seq_file *m, void *v)
{
int n;
for(n=0; n<boot_cpu_data.cpu_count; n++) {
#ifdef CONFIG_SMP
if (0 == cpu_data[n].hpa)
continue;
#ifdef ENTRY_SYS_CPUS
#error iCOD support wants to show CPU state here
#endif
#endif
seq_printf(m, "processor\t: %d\n"
"cpu family\t: PA-RISC %s\n",
n, boot_cpu_data.family_name);
seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name );
/* cpu MHz */
seq_printf(m, "cpu MHz\t\t: %d.%06d\n",
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
seq_printf(m, "model\t\t: %s\n"
"model name\t: %s\n",
boot_cpu_data.pdc.sys_model_name,
cpu_data[n].dev ?
cpu_data[n].dev->name : "Unknown" );
seq_printf(m, "hversion\t: 0x%08x\n"
"sversion\t: 0x%08x\n",
boot_cpu_data.hversion,
boot_cpu_data.sversion );
/* print cachesize info */
show_cache_info(m);
seq_printf(m, "bogomips\t: %lu.%02lu\n",
cpu_data[n].loops_per_jiffy / (500000 / HZ),
(cpu_data[n].loops_per_jiffy / (5000 / HZ)) % 100);
seq_printf(m, "software id\t: %ld\n\n",
boot_cpu_data.pdc.model.sw_id);
}
return 0;
}
static struct parisc_device_id processor_tbl[] = {
{ HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID },
{ 0, }
};
static struct parisc_driver cpu_driver = {
.name = "CPU",
.id_table = processor_tbl,
.probe = processor_probe
};
/**
* processor_init - Processor initalization procedure.
*
* Register this driver.
*/
void __init processor_init(void)
{
register_parisc_driver(&cpu_driver);
}

423
arch/parisc/kernel/ptrace.c Normal file
View File

@@ -0,0 +1,423 @@
/*
* Kernel support for the ptrace() and syscall tracing interfaces.
*
* Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc.
* Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx>
* Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/compat.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/offsets.h>
/* PSW bits we allow the debugger to modify */
#define USER_PSW_BITS (PSW_N | PSW_V | PSW_CB)
#undef DEBUG_PTRACE
#ifdef DEBUG_PTRACE
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
#ifdef __LP64__
/* This function is needed to translate 32 bit pt_regs offsets in to
* 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel
* will request offset 12 if it wants gr3, but the lower 32 bits of
* the 64 bit kernels view of gr3 will be at offset 28 (3*8 + 4).
* This code relies on a 32 bit pt_regs being comprised of 32 bit values
* except for the fp registers which (a) are 64 bits, and (b) follow
* the gr registers at the start of pt_regs. The 32 bit pt_regs should
* be half the size of the 64 bit pt_regs, plus 32*4 to allow for fr[]
* being 64 bit in both cases.
*/
static long translate_usr_offset(long offset)
{
if (offset < 0)
return -1;
else if (offset <= 32*4) /* gr[0..31] */
return offset * 2 + 4;
else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */
return offset + 32*4;
else if (offset < sizeof(struct pt_regs)/2 + 32*4)
return offset * 2 + 4 - 32*8;
else
return -1;
}
#endif
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* make sure the trap bits are not set */
pa_psw(child)->r = 0;
pa_psw(child)->t = 0;
pa_psw(child)->h = 0;
pa_psw(child)->l = 0;
}
long sys_ptrace(long request, pid_t pid, long addr, long data)
{
struct task_struct *child;
long ret;
#ifdef DEBUG_PTRACE
long oaddr=addr, odata=data;
#endif
lock_kernel();
ret = -EPERM;
if (request == PTRACE_TRACEME) {
/* are we already being traced? */
if (current->ptrace & PT_PTRACED)
goto out;
ret = security_ptrace(current->parent, current);
if (ret)
goto out;
/* set the ptrace bit in the process flags. */
current->ptrace |= PT_PTRACED;
ret = 0;
goto out;
}
ret = -ESRCH;
read_lock(&tasklist_lock);
child = find_task_by_pid(pid);
if (child)
get_task_struct(child);
read_unlock(&tasklist_lock);
if (!child)
goto out;
ret = -EPERM;
if (pid == 1) /* no messing around with init! */
goto out_tsk;
if (request == PTRACE_ATTACH) {
ret = ptrace_attach(child);
goto out_tsk;
}
ret = ptrace_check_attach(child, request == PTRACE_KILL);
if (ret < 0)
goto out_tsk;
switch (request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA: {
int copied;
#ifdef __LP64__
if (is_compat_task(child)) {
unsigned int tmp;
addr &= 0xffffffffL;
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
ret = -EIO;
if (copied != sizeof(tmp))
goto out_tsk;
ret = put_user(tmp,(unsigned int *) data);
DBG("sys_ptrace(PEEK%s, %d, %lx, %lx) returning %ld, data %x\n",
request == PTRACE_PEEKTEXT ? "TEXT" : "DATA",
pid, oaddr, odata, ret, tmp);
}
else
#endif
{
unsigned long tmp;
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
ret = -EIO;
if (copied != sizeof(tmp))
goto out_tsk;
ret = put_user(tmp,(unsigned long *) data);
}
goto out_tsk;
}
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
ret = 0;
#ifdef __LP64__
if (is_compat_task(child)) {
unsigned int tmp = (unsigned int)data;
DBG("sys_ptrace(POKE%s, %d, %lx, %lx)\n",
request == PTRACE_POKETEXT ? "TEXT" : "DATA",
pid, oaddr, odata);
addr &= 0xffffffffL;
if (access_process_vm(child, addr, &tmp, sizeof(tmp), 1) == sizeof(tmp))
goto out_tsk;
}
else
#endif
{
if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
goto out_tsk;
}
ret = -EIO;
goto out_tsk;
/* Read the word at location addr in the USER area. For ptraced
processes, the kernel saves all regs on a syscall. */
case PTRACE_PEEKUSR: {
ret = -EIO;
#ifdef __LP64__
if (is_compat_task(child)) {
unsigned int tmp;
if (addr & (sizeof(int)-1))
goto out_tsk;
if ((addr = translate_usr_offset(addr)) < 0)
goto out_tsk;
tmp = *(unsigned int *) ((char *) task_regs(child) + addr);
ret = put_user(tmp, (unsigned int *) data);
DBG("sys_ptrace(PEEKUSR, %d, %lx, %lx) returning %ld, addr %lx, data %x\n",
pid, oaddr, odata, ret, addr, tmp);
}
else
#endif
{
unsigned long tmp;
if ((addr & (sizeof(long)-1)) || (unsigned long) addr >= sizeof(struct pt_regs))
goto out_tsk;
tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
ret = put_user(tmp, (unsigned long *) data);
}
goto out_tsk;
}
/* Write the word at location addr in the USER area. This will need
to change when the kernel no longer saves all regs on a syscall.
FIXME. There is a problem at the moment in that r3-r18 are only
saved if the process is ptraced on syscall entry, and even then
those values are overwritten by actual register values on syscall
exit. */
case PTRACE_POKEUSR:
ret = -EIO;
/* Some register values written here may be ignored in
* entry.S:syscall_restore_rfi; e.g. iaoq is written with
* r31/r31+4, and not with the values in pt_regs.
*/
/* PT_PSW=0, so this is valid for 32 bit processes under 64
* bit kernels.
*/
if (addr == PT_PSW) {
/* PT_PSW=0, so this is valid for 32 bit processes
* under 64 bit kernels.
*
* Allow writing to Nullify, Divide-step-correction,
* and carry/borrow bits.
* BEWARE, if you set N, and then single step, it won't
* stop on the nullified instruction.
*/
DBG("sys_ptrace(POKEUSR, %d, %lx, %lx)\n",
pid, oaddr, odata);
data &= USER_PSW_BITS;
task_regs(child)->gr[0] &= ~USER_PSW_BITS;
task_regs(child)->gr[0] |= data;
ret = 0;
goto out_tsk;
}
#ifdef __LP64__
if (is_compat_task(child)) {
if (addr & (sizeof(int)-1))
goto out_tsk;
if ((addr = translate_usr_offset(addr)) < 0)
goto out_tsk;
DBG("sys_ptrace(POKEUSR, %d, %lx, %lx) addr %lx\n",
pid, oaddr, odata, addr);
if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
/* Special case, fp regs are 64 bits anyway */
*(unsigned int *) ((char *) task_regs(child) + addr) = data;
ret = 0;
}
else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 ||
addr == PT_SAR+4) {
/* Zero the top 32 bits */
*(unsigned int *) ((char *) task_regs(child) + addr - 4) = 0;
*(unsigned int *) ((char *) task_regs(child) + addr) = data;
ret = 0;
}
goto out_tsk;
}
else
#endif
{
if ((addr & (sizeof(long)-1)) || (unsigned long) addr >= sizeof(struct pt_regs))
goto out_tsk;
if ((addr >= PT_GR1 && addr <= PT_GR31) ||
addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
(addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
addr == PT_SAR) {
*(unsigned long *) ((char *) task_regs(child) + addr) = data;
ret = 0;
}
goto out_tsk;
}
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
case PTRACE_CONT:
ret = -EIO;
DBG("sys_ptrace(%s)\n",
request == PTRACE_SYSCALL ? "SYSCALL" : "CONT");
if ((unsigned long) data > _NSIG)
goto out_tsk;
child->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP);
if (request == PTRACE_SYSCALL) {
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
} else {
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
}
child->exit_code = data;
goto out_wake_notrap;
case PTRACE_KILL:
/*
* make the child exit. Best I can do is send it a
* sigkill. perhaps it should be put in the status
* that it wants to exit.
*/
DBG("sys_ptrace(KILL)\n");
if (child->exit_state == EXIT_ZOMBIE) /* already dead */
goto out_tsk;
child->exit_code = SIGKILL;
goto out_wake_notrap;
case PTRACE_SINGLEBLOCK:
DBG("sys_ptrace(SINGLEBLOCK)\n");
ret = -EIO;
if ((unsigned long) data > _NSIG)
goto out_tsk;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->ptrace &= ~PT_SINGLESTEP;
child->ptrace |= PT_BLOCKSTEP;
child->exit_code = data;
/* Enable taken branch trap. */
pa_psw(child)->r = 0;
pa_psw(child)->t = 1;
pa_psw(child)->h = 0;
pa_psw(child)->l = 0;
goto out_wake;
case PTRACE_SINGLESTEP:
DBG("sys_ptrace(SINGLESTEP)\n");
ret = -EIO;
if ((unsigned long) data > _NSIG)
goto out_tsk;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->ptrace &= ~PT_BLOCKSTEP;
child->ptrace |= PT_SINGLESTEP;
child->exit_code = data;
if (pa_psw(child)->n) {
struct siginfo si;
/* Nullified, just crank over the queue. */
task_regs(child)->iaoq[0] = task_regs(child)->iaoq[1];
task_regs(child)->iasq[0] = task_regs(child)->iasq[1];
task_regs(child)->iaoq[1] = task_regs(child)->iaoq[0] + 4;
pa_psw(child)->n = 0;
pa_psw(child)->x = 0;
pa_psw(child)->y = 0;
pa_psw(child)->z = 0;
pa_psw(child)->b = 0;
ptrace_disable(child);
/* Don't wake up the child, but let the
parent know something happened. */
si.si_code = TRAP_TRACE;
si.si_addr = (void __user *) (task_regs(child)->iaoq[0] & ~3);
si.si_signo = SIGTRAP;
si.si_errno = 0;
force_sig_info(SIGTRAP, &si, child);
//notify_parent(child, SIGCHLD);
//ret = 0;
goto out_wake;
}
/* Enable recovery counter traps. The recovery counter
* itself will be set to zero on a task switch. If the
* task is suspended on a syscall then the syscall return
* path will overwrite the recovery counter with a suitable
* value such that it traps once back in user space. We
* disable interrupts in the childs PSW here also, to avoid
* interrupts while the recovery counter is decrementing.
*/
pa_psw(child)->r = 1;
pa_psw(child)->t = 0;
pa_psw(child)->h = 0;
pa_psw(child)->l = 0;
/* give it a chance to run. */
goto out_wake;
case PTRACE_DETACH:
ret = ptrace_detach(child, data);
goto out_tsk;
case PTRACE_GETEVENTMSG:
ret = put_user(child->ptrace_message, (unsigned int __user *) data);
goto out_tsk;
default:
ret = ptrace_request(child, request, addr, data);
goto out_tsk;
}
out_wake_notrap:
ptrace_disable(child);
out_wake:
wake_up_process(child);
ret = 0;
out_tsk:
put_task_struct(child);
out:
unlock_kernel();
DBG("sys_ptrace(%ld, %d, %lx, %lx) returning %ld\n",
request, pid, oaddr, odata, ret);
return ret;
}
void syscall_trace(void)
{
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
if (!(current->ptrace & PT_PTRACED))
return;
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
}

304
arch/parisc/kernel/real2.S Normal file
View File

@@ -0,0 +1,304 @@
/*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com)
*
*/
#include <asm/assembly.h>
#include <asm/psw.h>
.section .bss
.export real_stack
.export real32_stack
.export real64_stack
.align 64
real_stack:
real32_stack:
real64_stack:
.block 8192
#ifdef __LP64__
# define REG_SZ 8
#else
# define REG_SZ 4
#endif
#define N_SAVED_REGS 9
save_cr_space:
.block REG_SZ * N_SAVED_REGS
save_cr_end:
/************************ 32-bit real-mode calls ***********************/
/* This can be called in both narrow and wide kernels */
.text
.export real32_call_asm
/* unsigned long real32_call_asm(unsigned int *sp,
* unsigned int *arg0p,
* unsigned int iodc_fn)
* sp is value of stack pointer to adopt before calling PDC (virt)
* arg0p points to where saved arg values may be found
* iodc_fn is the IODC function to call
*/
real32_call_asm:
STREG %rp, -RP_OFFSET(%sp) /* save RP */
#ifdef __LP64__
callee_save
ldo 2*REG_SZ(%sp), %sp /* room for a couple more saves */
STREG %r27, -1*REG_SZ(%sp)
STREG %r29, -2*REG_SZ(%sp)
#endif
STREG %sp, -REG_SZ(%arg0) /* save SP on real-mode stack */
copy %arg0, %sp /* adopt the real-mode SP */
/* save iodc_fn */
copy %arg2, %r31
/* load up the arg registers from the saved arg area */
/* 32-bit calling convention passes first 4 args in registers */
ldw 0(%arg1), %arg0 /* note overwriting arg0 */
ldw -8(%arg1), %arg2
ldw -12(%arg1), %arg3
ldw -4(%arg1), %arg1 /* obviously must do this one last! */
tophys_r1 %sp
b,l rfi_virt2real,%r2
nop
b,l save_control_regs,%r2 /* modifies r1, r2, r28 */
nop
#ifdef __LP64__
rsm PSW_SM_W, %r0 /* go narrow */
#endif
load32 PA(ric_ret), %r2
bv 0(%r31)
nop
ric_ret:
#ifdef __LP64__
ssm PSW_SM_W, %r0 /* go wide */
#endif
/* restore CRs before going virtual in case we page fault */
b,l restore_control_regs, %r2 /* modifies r1, r2, r26 */
nop
b,l rfi_real2virt,%r2
nop
tovirt_r1 %sp
LDREG -REG_SZ(%sp), %sp /* restore SP */
#ifdef __LP64__
LDREG -1*REG_SZ(%sp), %r27
LDREG -2*REG_SZ(%sp), %r29
ldo -2*REG_SZ(%sp), %sp
callee_rest
#endif
LDREG -RP_OFFSET(%sp), %rp /* restore RP */
bv 0(%rp)
nop
# define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where)
# define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r
.text
save_control_regs:
load32 PA(save_cr_space), %r28
PUSH_CR(%cr24, %r28)
PUSH_CR(%cr25, %r28)
PUSH_CR(%cr26, %r28)
PUSH_CR(%cr27, %r28)
PUSH_CR(%cr28, %r28)
PUSH_CR(%cr29, %r28)
PUSH_CR(%cr30, %r28)
PUSH_CR(%cr31, %r28)
PUSH_CR(%cr15, %r28)
bv 0(%r2)
nop
restore_control_regs:
load32 PA(save_cr_end), %r26
POP_CR(%cr15, %r26)
POP_CR(%cr31, %r26)
POP_CR(%cr30, %r26)
POP_CR(%cr29, %r26)
POP_CR(%cr28, %r26)
POP_CR(%cr27, %r26)
POP_CR(%cr26, %r26)
POP_CR(%cr25, %r26)
POP_CR(%cr24, %r26)
bv 0(%r2)
nop
/* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for
* more general-purpose use by the several places which need RFIs
*/
.align 128
.text
rfi_virt2real:
/* switch to real mode... */
ssm 0,0 /* See "relied upon translation" */
nop /* PA 2.0 Arch. F-5 */
nop
nop
nop
nop
nop
nop
nop
rsm (PSW_SM_Q|PSW_SM_I),%r0 /* disable Q & I bits to load iia queue */
mtctl %r0, %cr17 /* Clear IIASQ tail */
mtctl %r0, %cr17 /* Clear IIASQ head */
load32 PA(rfi_v2r_1), %r1
mtctl %r1, %cr18 /* IIAOQ head */
ldo 4(%r1), %r1
mtctl %r1, %cr18 /* IIAOQ tail */
load32 REAL_MODE_PSW, %r1
mtctl %r1, %cr22
rfi
nop
nop
nop
nop
nop
nop
nop
nop
rfi_v2r_1:
tophys_r1 %r2
bv 0(%r2)
nop
.text
.align 128
rfi_real2virt:
ssm 0,0 /* See "relied upon translation" */
nop /* PA 2.0 Arch. F-5 */
nop
nop
nop
nop
nop
nop
nop
rsm PSW_SM_Q,%r0 /* disable Q bit to load iia queue */
mtctl %r0, %cr17 /* Clear IIASQ tail */
mtctl %r0, %cr17 /* Clear IIASQ head */
load32 (rfi_r2v_1), %r1
mtctl %r1, %cr18 /* IIAOQ head */
ldo 4(%r1), %r1
mtctl %r1, %cr18 /* IIAOQ tail */
load32 KERNEL_PSW, %r1
mtctl %r1, %cr22
rfi
nop
nop
nop
nop
nop
nop
nop
nop
rfi_r2v_1:
tovirt_r1 %r2
bv 0(%r2)
nop
#ifdef __LP64__
/************************ 64-bit real-mode calls ***********************/
/* This is only usable in wide kernels right now and will probably stay so */
.text
.export real64_call_asm
/* unsigned long real64_call_asm(unsigned long *sp,
* unsigned long *arg0p,
* unsigned long fn)
* sp is value of stack pointer to adopt before calling PDC (virt)
* arg0p points to where saved arg values may be found
* iodc_fn is the IODC function to call
*/
real64_call_asm:
std %rp, -0x10(%sp) /* save RP */
std %sp, -8(%arg0) /* save SP on real-mode stack */
copy %arg0, %sp /* adopt the real-mode SP */
/* save fn */
copy %arg2, %r31
/* set up the new ap */
ldo 64(%arg1), %r29
/* load up the arg registers from the saved arg area */
/* 32-bit calling convention passes first 4 args in registers */
ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */
ldd 2*REG_SZ(%arg1), %arg2
ldd 3*REG_SZ(%arg1), %arg3
ldd 4*REG_SZ(%arg1), %r22
ldd 5*REG_SZ(%arg1), %r21
ldd 6*REG_SZ(%arg1), %r20
ldd 7*REG_SZ(%arg1), %r19
ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */
tophys_r1 %sp
b,l rfi_virt2real,%r2
nop
b,l save_control_regs,%r2 /* modifies r1, r2, r28 */
nop
load32 PA(r64_ret), %r2
bv 0(%r31)
nop
r64_ret:
/* restore CRs before going virtual in case we page fault */
b,l restore_control_regs, %r2 /* modifies r1, r2, r26 */
nop
b,l rfi_real2virt,%r2
nop
tovirt_r1 %sp
ldd -8(%sp), %sp /* restore SP */
ldd -0x10(%sp), %rp /* restore RP */
bv 0(%rp)
nop
#endif
.export pc_in_user_space
.text
/* Doesn't belong here but I couldn't find a nicer spot. */
/* Should never get called, only used by profile stuff in time.c */
pc_in_user_space:
bv,n 0(%rp)
nop
.export __canonicalize_funcptr_for_compare
.text
/* http://lists.parisc-linux.org/hypermail/parisc-linux/10916.html
** GCC 3.3 and later has a new function in libgcc.a for
** comparing function pointers.
*/
__canonicalize_funcptr_for_compare:
#ifdef __LP64__
bve (%r2)
#else
bv %r0(%r2)
#endif
copy %r26,%r28

View File

@@ -0,0 +1,102 @@
/*
* Semaphore implementation Copyright (c) 2001 Matthew Wilcox, Hewlett-Packard
*/
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/init.h>
/*
* Semaphores are complex as we wish to avoid using two variables.
* `count' has multiple roles, depending on its value. If it is positive
* or zero, there are no waiters. The functions here will never be
* called; see <asm/semaphore.h>
*
* When count is -1 it indicates there is at least one task waiting
* for the semaphore.
*
* When count is less than that, there are '- count - 1' wakeups
* pending. ie if it has value -3, there are 2 wakeups pending.
*
* Note that these functions are only called when there is contention
* on the lock, and as such all this is the "non-critical" part of the
* whole semaphore business. The critical part is the inline stuff in
* <asm/semaphore.h> where we want to avoid any extra jumps and calls.
*/
void __up(struct semaphore *sem)
{
sem->count--;
wake_up(&sem->wait);
}
#define wakers(count) (-1 - count)
#define DOWN_HEAD \
int ret = 0; \
DECLARE_WAITQUEUE(wait, current); \
\
/* Note that someone is waiting */ \
if (sem->count == 0) \
sem->count = -1; \
\
/* protected by the sentry still -- use unlocked version */ \
wait.flags = WQ_FLAG_EXCLUSIVE; \
__add_wait_queue_tail(&sem->wait, &wait); \
lost_race: \
spin_unlock_irq(&sem->sentry); \
#define DOWN_TAIL \
spin_lock_irq(&sem->sentry); \
if (wakers(sem->count) == 0 && ret == 0) \
goto lost_race; /* Someone stole our wakeup */ \
__remove_wait_queue(&sem->wait, &wait); \
current->state = TASK_RUNNING; \
if (!waitqueue_active(&sem->wait) && (sem->count < 0)) \
sem->count = wakers(sem->count);
#define UPDATE_COUNT \
sem->count += (sem->count < 0) ? 1 : - 1;
void __sched __down(struct semaphore * sem)
{
DOWN_HEAD
for(;;) {
set_task_state(current, TASK_UNINTERRUPTIBLE);
/* we can _read_ this without the sentry */
if (sem->count != -1)
break;
schedule();
}
DOWN_TAIL
UPDATE_COUNT
}
int __sched __down_interruptible(struct semaphore * sem)
{
DOWN_HEAD
for(;;) {
set_task_state(current, TASK_INTERRUPTIBLE);
/* we can _read_ this without the sentry */
if (sem->count != -1)
break;
if (signal_pending(current)) {
ret = -EINTR;
break;
}
schedule();
}
DOWN_TAIL
if (!ret) {
UPDATE_COUNT
}
return ret;
}

368
arch/parisc/kernel/setup.c Normal file
View File

@@ -0,0 +1,368 @@
/* $Id: setup.c,v 1.8 2000/02/02 04:42:38 prumpf Exp $
*
* Initial setup-routines for HP 9000 based hardware.
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
* Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de>
* Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
* Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
* Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
* Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
*
* Initial PA-RISC Version: 04-23-1999 by Helge Deller
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/initrd.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/seq_file.h>
#define PCI_DEBUG
#include <linux/pci.h>
#undef PCI_DEBUG
#include <linux/proc_fs.h>
#include <asm/processor.h>
#include <asm/pdc.h>
#include <asm/led.h>
#include <asm/machdep.h> /* for pa7300lc_init() proto */
#include <asm/pdc_chassis.h>
#include <asm/io.h>
#include <asm/setup.h>
char command_line[COMMAND_LINE_SIZE];
/* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */
struct proc_dir_entry * proc_runway_root = NULL;
struct proc_dir_entry * proc_gsc_root = NULL;
struct proc_dir_entry * proc_mckinley_root = NULL;
#if !defined(CONFIG_PA20) && (defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA))
int parisc_bus_is_phys = 1; /* Assume no IOMMU is present */
EXPORT_SYMBOL(parisc_bus_is_phys);
#endif
/* This sets the vmerge boundary and size, it's here because it has to
* be available on all platforms (zero means no-virtual merging) */
unsigned long parisc_vmerge_boundary = 0;
unsigned long parisc_vmerge_max_size = 0;
void __init setup_cmdline(char **cmdline_p)
{
extern unsigned int boot_args[];
/* Collect stuff passed in from the boot loader */
/* boot_args[0] is free-mem start, boot_args[1] is ptr to command line */
if (boot_args[0] < 64) {
/* called from hpux boot loader */
saved_command_line[0] = '\0';
} else {
strcpy(saved_command_line, (char *)__va(boot_args[1]));
#ifdef CONFIG_BLK_DEV_INITRD
if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
{
initrd_start = (unsigned long)__va(boot_args[2]);
initrd_end = (unsigned long)__va(boot_args[3]);
}
#endif
}
strcpy(command_line, saved_command_line);
*cmdline_p = command_line;
}
#ifdef CONFIG_PA11
void __init dma_ops_init(void)
{
switch (boot_cpu_data.cpu_type) {
case pcx:
/*
* We've got way too many dependencies on 1.1 semantics
* to support 1.0 boxes at this point.
*/
panic( "PA-RISC Linux currently only supports machines that conform to\n"
"the PA-RISC 1.1 or 2.0 architecture specification.\n");
case pcxs:
case pcxt:
hppa_dma_ops = &pcx_dma_ops;
break;
case pcxl2:
pa7300lc_init();
case pcxl: /* falls through */
hppa_dma_ops = &pcxl_dma_ops;
break;
default:
break;
}
}
#endif
extern int init_per_cpu(int cpuid);
extern void collect_boot_cpu_data(void);
void __init setup_arch(char **cmdline_p)
{
#ifdef __LP64__
extern int parisc_narrow_firmware;
#endif
init_per_cpu(smp_processor_id()); /* Set Modes & Enable FP */
#ifdef __LP64__
printk(KERN_INFO "The 64-bit Kernel has started...\n");
#else
printk(KERN_INFO "The 32-bit Kernel has started...\n");
#endif
pdc_console_init();
#ifdef __LP64__
if(parisc_narrow_firmware) {
printk(KERN_INFO "Kernel is using PDC in 32-bit mode.\n");
}
#endif
setup_pdc();
setup_cmdline(cmdline_p);
collect_boot_cpu_data();
do_memory_inventory(); /* probe for physical memory */
parisc_cache_init();
paging_init();
#ifdef CONFIG_CHASSIS_LCD_LED
/* initialize the LCD/LED after boot_cpu_data is available ! */
led_init(); /* LCD/LED initialization */
#endif
#ifdef CONFIG_PA11
dma_ops_init();
#endif
#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con; /* we use take_over_console() later ! */
#endif
}
/*
* Display cpu info for all cpu's.
* for parisc this is in processor.c
*/
extern int show_cpuinfo (struct seq_file *m, void *v);
static void *
c_start (struct seq_file *m, loff_t *pos)
{
/* Looks like the caller will call repeatedly until we return
* 0, signaling EOF perhaps. This could be used to sequence
* through CPUs for example. Since we print all cpu info in our
* show_cpuinfo() disregarding 'pos' (which I assume is 'v' above)
* we only allow for one "position". */
return ((long)*pos < 1) ? (void *)1 : NULL;
}
static void *
c_next (struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void
c_stop (struct seq_file *m, void *v)
{
}
struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo
};
static void __init parisc_proc_mkdir(void)
{
/*
** Can't call proc_mkdir() until after proc_root_init() has been
** called by start_kernel(). In other words, this code can't
** live in arch/.../setup.c because start_parisc() calls
** start_kernel().
*/
switch (boot_cpu_data.cpu_type) {
case pcxl:
case pcxl2:
if (NULL == proc_gsc_root)
{
proc_gsc_root = proc_mkdir("bus/gsc", NULL);
}
break;
case pcxt_:
case pcxu:
case pcxu_:
case pcxw:
case pcxw_:
case pcxw2:
if (NULL == proc_runway_root)
{
proc_runway_root = proc_mkdir("bus/runway", NULL);
}
break;
case mako:
if (NULL == proc_mckinley_root)
{
proc_mckinley_root = proc_mkdir("bus/mckinley", NULL);
}
break;
default:
/* FIXME: this was added to prevent the compiler
* complaining about missing pcx, pcxs and pcxt
* I'm assuming they have neither gsc nor runway */
break;
}
}
static struct resource central_bus = {
.name = "Central Bus",
.start = F_EXTEND(0xfff80000),
.end = F_EXTEND(0xfffaffff),
.flags = IORESOURCE_MEM,
};
static struct resource local_broadcast = {
.name = "Local Broadcast",
.start = F_EXTEND(0xfffb0000),
.end = F_EXTEND(0xfffdffff),
.flags = IORESOURCE_MEM,
};
static struct resource global_broadcast = {
.name = "Global Broadcast",
.start = F_EXTEND(0xfffe0000),
.end = F_EXTEND(0xffffffff),
.flags = IORESOURCE_MEM,
};
static int __init parisc_init_resources(void)
{
int result;
result = request_resource(&iomem_resource, &central_bus);
if (result < 0) {
printk(KERN_ERR
"%s: failed to claim %s address space!\n",
__FILE__, central_bus.name);
return result;
}
result = request_resource(&iomem_resource, &local_broadcast);
if (result < 0) {
printk(KERN_ERR
"%s: failed to claim %saddress space!\n",
__FILE__, local_broadcast.name);
return result;
}
result = request_resource(&iomem_resource, &global_broadcast);
if (result < 0) {
printk(KERN_ERR
"%s: failed to claim %s address space!\n",
__FILE__, global_broadcast.name);
return result;
}
return 0;
}
extern void gsc_init(void);
extern void processor_init(void);
extern void ccio_init(void);
extern void hppb_init(void);
extern void dino_init(void);
extern void iosapic_init(void);
extern void lba_init(void);
extern void sba_init(void);
extern void eisa_init(void);
static int __init parisc_init(void)
{
parisc_proc_mkdir();
parisc_init_resources();
do_device_inventory(); /* probe for hardware */
parisc_pdc_chassis_init();
/* set up a new led state on systems shipped LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BSTART);
processor_init();
printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n",
boot_cpu_data.cpu_count,
boot_cpu_data.cpu_name,
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
parisc_setup_cache_timing();
/* These are in a non-obvious order, will fix when we have an iotree */
#if defined(CONFIG_IOSAPIC)
iosapic_init();
#endif
#if defined(CONFIG_IOMMU_SBA)
sba_init();
#endif
#if defined(CONFIG_PCI_LBA)
lba_init();
#endif
/* CCIO before any potential subdevices */
#if defined(CONFIG_IOMMU_CCIO)
ccio_init();
#endif
/*
* Need to register Asp & Wax before the EISA adapters for the IRQ
* regions. EISA must come before PCI to be sure it gets IRQ region
* 0.
*/
#if defined(CONFIG_GSC_LASI) || defined(CONFIG_GSC_WAX)
gsc_init();
#endif
#ifdef CONFIG_EISA
eisa_init();
#endif
#if defined(CONFIG_HPPB)
hppb_init();
#endif
#if defined(CONFIG_GSC_DINO)
dino_init();
#endif
#ifdef CONFIG_CHASSIS_LCD_LED
register_led_regions(); /* register LED port info in procfs */
#endif
return 0;
}
arch_initcall(parisc_init);

664
arch/parisc/kernel/signal.c Normal file
View File

@@ -0,0 +1,664 @@
/*
* linux/arch/parisc/kernel/signal.c: Architecture-specific signal
* handling support.
*
* Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
* Copyright (C) 2000 Linuxcare, Inc.
*
* Based on the ia64, i386, and alpha versions.
*
* Like the IA-64, we are a recent enough port (we are *starting*
* with glibc2.2) that we do not need to support the old non-realtime
* Linux signals. Therefore we don't. HP/UX signals will go in
* arch/parisc/hpux/signal.c when we figure out how to do them.
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/compat.h>
#include <linux/elf.h>
#include <linux/personality.h>
#include <asm/ucontext.h>
#include <asm/rt_sigframe.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/offsets.h>
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
#include "signal32.h"
#endif
#define DEBUG_SIG 0
#define DEBUG_SIG_LEVEL 2
#if DEBUG_SIG
#define DBG(LEVEL, ...) \
((DEBUG_SIG_LEVEL >= LEVEL) \
? printk(__VA_ARGS__) : (void) 0)
#else
#define DBG(LEVEL, ...)
#endif
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
/* gcc will complain if a pointer is cast to an integer of different
* size. If you really need to do this (and we do for an ELF32 user
* application in an ELF64 kernel) then you have to do a cast to an
* integer of the same size first. The A() macro accomplishes
* this. */
#define A(__x) ((unsigned long)(__x))
int do_signal(sigset_t *oldset, struct pt_regs *regs, int in_syscall);
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
#ifdef __LP64__
#include "sys32.h"
#endif
asmlinkage int
sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
{
sigset_t saveset, newset;
#ifdef __LP64__
compat_sigset_t newset32;
if(personality(current->personality) == PER_LINUX32){
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(compat_sigset_t))
return -EINVAL;
if (copy_from_user(&newset32, (compat_sigset_t __user *)unewset, sizeof(newset32)))
return -EFAULT;
sigset_32to64(&newset,&newset32);
} else
#endif
{
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (copy_from_user(&newset, unewset, sizeof(newset)))
return -EFAULT;
}
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
regs->gr[28] = -EINTR;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
if (do_signal(&saveset, regs, 1))
return -EINTR;
}
}
/*
* Do a signal return - restore sigcontext.
*/
/* Trampoline for calling rt_sigreturn() */
#define INSN_LDI_R25_0 0x34190000 /* ldi 0,%r25 (in_syscall=0) */
#define INSN_LDI_R25_1 0x34190002 /* ldi 1,%r25 (in_syscall=1) */
#define INSN_LDI_R20 0x3414015a /* ldi __NR_rt_sigreturn,%r20 */
#define INSN_BLE_SR2_R0 0xe4008200 /* be,l 0x100(%sr2,%r0),%sr0,%r31 */
#define INSN_NOP 0x08000240 /* nop */
/* For debugging */
#define INSN_DIE_HORRIBLY 0x68000ccc /* stw %r0,0x666(%sr0,%r0) */
static long
restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
{
long err = 0;
err |= __copy_from_user(regs->gr, sc->sc_gr, sizeof(regs->gr));
err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr));
err |= __copy_from_user(regs->iaoq, sc->sc_iaoq, sizeof(regs->iaoq));
err |= __copy_from_user(regs->iasq, sc->sc_iasq, sizeof(regs->iasq));
err |= __get_user(regs->sar, &sc->sc_sar);
DBG(2,"restore_sigcontext: iaoq is 0x%#lx / 0x%#lx\n",
regs->iaoq[0],regs->iaoq[1]);
DBG(2,"restore_sigcontext: r28 is %ld\n", regs->gr[28]);
return err;
}
void
sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
{
struct rt_sigframe __user *frame;
struct siginfo si;
sigset_t set;
unsigned long usp = (regs->gr[30] & ~(0x01UL));
unsigned long sigframe_size = PARISC_RT_SIGFRAME_SIZE;
#ifdef __LP64__
compat_sigset_t compat_set;
struct compat_rt_sigframe __user * compat_frame;
if(personality(current->personality) == PER_LINUX32)
sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
#endif
/* Unwind the user stack to get the rt_sigframe structure. */
frame = (struct rt_sigframe __user *)
(usp - sigframe_size);
DBG(2,"sys_rt_sigreturn: frame is %p\n", frame);
#ifdef __LP64__
compat_frame = (struct compat_rt_sigframe __user *)frame;
if(personality(current->personality) == PER_LINUX32){
DBG(2,"sys_rt_sigreturn: ELF32 process.\n");
if (__copy_from_user(&compat_set, &compat_frame->uc.uc_sigmask, sizeof(compat_set)))
goto give_sigsegv;
sigset_32to64(&set,&compat_set);
} else
#endif
{
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto give_sigsegv;
}
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
/* Good thing we saved the old gr[30], eh? */
#ifdef __LP64__
if(personality(current->personality) == PER_LINUX32){
DBG(1,"sys_rt_sigreturn: compat_frame->uc.uc_mcontext 0x%p\n",
&compat_frame->uc.uc_mcontext);
// FIXME: Load upper half from register file
if (restore_sigcontext32(&compat_frame->uc.uc_mcontext,
&compat_frame->regs, regs))
goto give_sigsegv;
DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n",
usp, &compat_frame->uc.uc_stack);
if (do_sigaltstack32(&compat_frame->uc.uc_stack, NULL, usp) == -EFAULT)
goto give_sigsegv;
} else
#endif
{
DBG(1,"sys_rt_sigreturn: frame->uc.uc_mcontext 0x%p\n",
&frame->uc.uc_mcontext);
if (restore_sigcontext(&frame->uc.uc_mcontext, regs))
goto give_sigsegv;
DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n",
usp, &frame->uc.uc_stack);
if (do_sigaltstack(&frame->uc.uc_stack, NULL, usp) == -EFAULT)
goto give_sigsegv;
}
/* If we are on the syscall path IAOQ will not be restored, and
* if we are on the interrupt path we must not corrupt gr31.
*/
if (in_syscall)
regs->gr[31] = regs->iaoq[0];
#if DEBUG_SIG
DBG(1,"sys_rt_sigreturn: returning to %#lx, DUMPING REGS:\n", regs->iaoq[0]);
show_regs(regs);
#endif
return;
give_sigsegv:
DBG(1,"sys_rt_sigreturn: Sending SIGSEGV\n");
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = SI_KERNEL;
si.si_pid = current->pid;
si.si_uid = current->uid;
si.si_addr = &frame->uc;
force_sig_info(SIGSEGV, &si, current);
return;
}
/*
* Set up a signal frame.
*/
static inline void __user *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
/*FIXME: ELF32 vs. ELF64 has different frame_size, but since we
don't use the parameter it doesn't matter */
DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n",
(unsigned long)ka, sp, frame_size);
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
sp = current->sas_ss_sp; /* Stacks grow up! */
DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp);
return (void __user *) sp; /* Stacks grow up. Fun. */
}
static long
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int in_syscall)
{
unsigned long flags = 0;
long err = 0;
if (on_sig_stack((unsigned long) sc))
flags |= PARISC_SC_FLAG_ONSTACK;
if (in_syscall) {
flags |= PARISC_SC_FLAG_IN_SYSCALL;
/* regs->iaoq is undefined in the syscall return path */
err |= __put_user(regs->gr[31], &sc->sc_iaoq[0]);
err |= __put_user(regs->gr[31]+4, &sc->sc_iaoq[1]);
err |= __put_user(regs->sr[3], &sc->sc_iasq[0]);
err |= __put_user(regs->sr[3], &sc->sc_iasq[1]);
DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (in syscall)\n",
regs->gr[31], regs->gr[31]+4);
} else {
err |= __copy_to_user(sc->sc_iaoq, regs->iaoq, sizeof(regs->iaoq));
err |= __copy_to_user(sc->sc_iasq, regs->iasq, sizeof(regs->iasq));
DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (not in syscall)\n",
regs->iaoq[0], regs->iaoq[1]);
}
err |= __put_user(flags, &sc->sc_flags);
err |= __copy_to_user(sc->sc_gr, regs->gr, sizeof(regs->gr));
err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr));
err |= __put_user(regs->sar, &sc->sc_sar);
DBG(1,"setup_sigcontext: r28 is %ld\n", regs->gr[28]);
return err;
}
static long
setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs, int in_syscall)
{
struct rt_sigframe __user *frame;
unsigned long rp, usp;
unsigned long haddr, sigframe_size;
struct siginfo si;
int err = 0;
#ifdef __LP64__
compat_int_t compat_val;
struct compat_rt_sigframe __user * compat_frame;
compat_sigset_t compat_set;
#endif
usp = (regs->gr[30] & ~(0x01UL));
/*FIXME: frame_size parameter is unused, remove it. */
frame = get_sigframe(ka, usp, sizeof(*frame));
DBG(1,"SETUP_RT_FRAME: START\n");
DBG(1,"setup_rt_frame: frame %p info %p\n", frame, info);
#ifdef __LP64__
compat_frame = (struct compat_rt_sigframe __user *)frame;
if(personality(current->personality) == PER_LINUX32) {
DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &compat_frame->info);
err |= compat_copy_siginfo_to_user(&compat_frame->info, info);
DBG(1,"SETUP_RT_FRAME: 1\n");
compat_val = (compat_int_t)current->sas_ss_sp;
err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_sp);
DBG(1,"SETUP_RT_FRAME: 2\n");
compat_val = (compat_int_t)current->sas_ss_size;
err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_size);
DBG(1,"SETUP_RT_FRAME: 3\n");
compat_val = sas_ss_flags(regs->gr[30]);
err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_flags);
DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &compat_frame->uc);
DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &compat_frame->uc.uc_mcontext);
err |= setup_sigcontext32(&compat_frame->uc.uc_mcontext,
&compat_frame->regs, regs, in_syscall);
sigset_64to32(&compat_set,set);
err |= __copy_to_user(&compat_frame->uc.uc_sigmask, &compat_set, sizeof(compat_set));
} else
#endif
{
DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &frame->info);
err |= copy_siginfo_to_user(&frame->info, info);
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= __put_user(sas_ss_flags(regs->gr[30]),
&frame->uc.uc_stack.ss_flags);
DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &frame->uc);
DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &frame->uc.uc_mcontext);
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, in_syscall);
/* FIXME: Should probably be converted aswell for the compat case */
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
}
if (err)
goto give_sigsegv;
/* Set up to return from userspace. If provided, use a stub
already in userspace. The first words of tramp are used to
save the previous sigrestartblock trampoline that might be
on the stack. We start the sigreturn trampoline at
SIGRESTARTBLOCK_TRAMP+X. */
err |= __put_user(in_syscall ? INSN_LDI_R25_1 : INSN_LDI_R25_0,
&frame->tramp[SIGRESTARTBLOCK_TRAMP+0]);
err |= __put_user(INSN_LDI_R20,
&frame->tramp[SIGRESTARTBLOCK_TRAMP+1]);
err |= __put_user(INSN_BLE_SR2_R0,
&frame->tramp[SIGRESTARTBLOCK_TRAMP+2]);
err |= __put_user(INSN_NOP, &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]);
#if DEBUG_SIG
/* Assert that we're flushing in the correct space... */
{
int sid;
asm ("mfsp %%sr3,%0" : "=r" (sid));
DBG(1,"setup_rt_frame: Flushing 64 bytes at space %#x offset %p\n",
sid, frame->tramp);
}
#endif
flush_user_dcache_range((unsigned long) &frame->tramp[0],
(unsigned long) &frame->tramp[TRAMP_SIZE]);
flush_user_icache_range((unsigned long) &frame->tramp[0],
(unsigned long) &frame->tramp[TRAMP_SIZE]);
/* TRAMP Words 0-4, Lenght 5 = SIGRESTARTBLOCK_TRAMP
* TRAMP Words 5-9, Length 4 = SIGRETURN_TRAMP
* So the SIGRETURN_TRAMP is at the end of SIGRESTARTBLOCK_TRAMP
*/
rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP];
if (err)
goto give_sigsegv;
haddr = A(ka->sa.sa_handler);
/* The sa_handler may be a pointer to a function descriptor */
#ifdef __LP64__
if(personality(current->personality) == PER_LINUX32) {
#endif
if (haddr & PA_PLABEL_FDESC) {
Elf32_Fdesc fdesc;
Elf32_Fdesc __user *ufdesc = (Elf32_Fdesc __user *)A(haddr & ~3);
err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc));
if (err)
goto give_sigsegv;
haddr = fdesc.addr;
regs->gr[19] = fdesc.gp;
}
#ifdef __LP64__
} else {
Elf64_Fdesc fdesc;
Elf64_Fdesc __user *ufdesc = (Elf64_Fdesc __user *)A(haddr & ~3);
err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc));
if (err)
goto give_sigsegv;
haddr = fdesc.addr;
regs->gr[19] = fdesc.gp;
DBG(1,"setup_rt_frame: 64 bit signal, exe=%#lx, r19=%#lx, in_syscall=%d\n",
haddr, regs->gr[19], in_syscall);
}
#endif
/* The syscall return path will create IAOQ values from r31.
*/
sigframe_size = PARISC_RT_SIGFRAME_SIZE;
#ifdef __LP64__
if(personality(current->personality) == PER_LINUX32)
sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
#endif
if (in_syscall) {
regs->gr[31] = haddr;
#ifdef __LP64__
if(personality(current->personality) == PER_LINUX)
sigframe_size |= 1;
#endif
} else {
unsigned long psw = USER_PSW;
#ifdef __LP64__
if(personality(current->personality) == PER_LINUX)
psw |= PSW_W;
#endif
/* If we are singlestepping, arrange a trap to be delivered
when we return to userspace. Note the semantics -- we
should trap before the first insn in the handler is
executed. Ref:
http://sources.redhat.com/ml/gdb/2004-11/msg00245.html
*/
if (pa_psw(current)->r) {
pa_psw(current)->r = 0;
psw |= PSW_R;
mtctl(-1, 0);
}
regs->gr[0] = psw;
regs->iaoq[0] = haddr | 3;
regs->iaoq[1] = regs->iaoq[0] + 4;
}
regs->gr[2] = rp; /* userland return pointer */
regs->gr[26] = sig; /* signal number */
#ifdef __LP64__
if(personality(current->personality) == PER_LINUX32){
regs->gr[25] = A(&compat_frame->info); /* siginfo pointer */
regs->gr[24] = A(&compat_frame->uc); /* ucontext pointer */
} else
#endif
{
regs->gr[25] = A(&frame->info); /* siginfo pointer */
regs->gr[24] = A(&frame->uc); /* ucontext pointer */
}
DBG(1,"setup_rt_frame: making sigreturn frame: %#lx + %#lx = %#lx\n",
regs->gr[30], sigframe_size,
regs->gr[30] + sigframe_size);
/* Raise the user stack pointer to make a proper call frame. */
regs->gr[30] = (A(frame) + sigframe_size);
DBG(1,"setup_rt_frame: sig deliver (%s,%d) frame=0x%p sp=%#lx iaoq=%#lx/%#lx rp=%#lx\n",
current->comm, current->pid, frame, regs->gr[30],
regs->iaoq[0], regs->iaoq[1], rp);
return 1;
give_sigsegv:
DBG(1,"setup_rt_frame: sending SIGSEGV\n");
if (sig == SIGSEGV)
ka->sa.sa_handler = SIG_DFL;
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = SI_KERNEL;
si.si_pid = current->pid;
si.si_uid = current->uid;
si.si_addr = frame;
force_sig_info(SIGSEGV, &si, current);
return 0;
}
/*
* OK, we're invoking a handler.
*/
static long
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
sigset_t *oldset, struct pt_regs *regs, int in_syscall)
{
DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n",
sig, ka, info, oldset, regs);
/* Set up the stack frame */
if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall))
return 0;
if (!(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
return 1;
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*
* We need to be able to restore the syscall arguments (r21-r26) to
* restart syscalls. Thus, the syscall path should save them in the
* pt_regs structure (it's okay to do so since they are caller-save
* registers). As noted below, the syscall number gets restored for
* us due to the magic of delayed branching.
*/
asmlinkage int
do_signal(sigset_t *oldset, struct pt_regs *regs, int in_syscall)
{
siginfo_t info;
struct k_sigaction ka;
int signr;
DBG(1,"\ndo_signal: oldset=0x%p, regs=0x%p, sr7 %#lx, in_syscall=%d\n",
oldset, regs, regs->sr[7], in_syscall);
/* Everyone else checks to see if they are in kernel mode at
this point and exits if that's the case. I'm not sure why
we would be called in that case, but for some reason we
are. */
if (!oldset)
oldset = &current->blocked;
DBG(1,"do_signal: oldset %08lx / %08lx\n",
oldset->sig[0], oldset->sig[1]);
/* May need to force signal if handle_signal failed to deliver */
while (1) {
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]);
if (signr <= 0)
break;
/* Restart a system call if necessary. */
if (in_syscall) {
/* Check the return code */
switch (regs->gr[28]) {
case -ERESTART_RESTARTBLOCK:
current_thread_info()->restart_block.fn = do_no_restart_syscall;
case -ERESTARTNOHAND:
DBG(1,"ERESTARTNOHAND: returning -EINTR\n");
regs->gr[28] = -EINTR;
break;
case -ERESTARTSYS:
if (!(ka.sa.sa_flags & SA_RESTART)) {
DBG(1,"ERESTARTSYS: putting -EINTR\n");
regs->gr[28] = -EINTR;
break;
}
/* fallthrough */
case -ERESTARTNOINTR:
/* A syscall is just a branch, so all
we have to do is fiddle the return pointer. */
regs->gr[31] -= 8; /* delayed branching */
/* Preserve original r28. */
regs->gr[28] = regs->orig_r28;
break;
}
}
/* Whee! Actually deliver the signal. If the
delivery failed, we need to continue to iterate in
this loop so we can deliver the SIGSEGV... */
if (handle_signal(signr, &info, &ka, oldset, regs, in_syscall)) {
DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
regs->gr[28]);
return 1;
}
}
/* end of while(1) looping forever if we can't force a signal */
/* Did we come from a system call? */
if (in_syscall) {
/* Restart the system call - no handlers present */
if (regs->gr[28] == -ERESTART_RESTARTBLOCK) {
unsigned int *usp = (unsigned int *)regs->gr[30];
/* Setup a trampoline to restart the syscall
* with __NR_restart_syscall
*
* 0: <return address (orig r31)>
* 4: <2nd half for 64-bit>
* 8: ldw 0(%sp), %r31
* 12: be 0x100(%sr2, %r0)
* 16: ldi __NR_restart_syscall, %r20
*/
#ifndef __LP64__
put_user(regs->gr[31], &usp[0]);
put_user(0x0fc0109f, &usp[2]);
#else
put_user(regs->gr[31] >> 32, &usp[0]);
put_user(regs->gr[31] & 0xffffffff, &usp[1]);
put_user(0x0fc010df, &usp[2]);
#endif
put_user(0xe0008200, &usp[3]);
put_user(0x34140000, &usp[4]);
/* Stack is 64-byte aligned, and we only
* need to flush 1 cache line */
asm("fdc 0(%%sr3, %0)\n"
"fic 0(%%sr3, %0)\n"
"sync\n"
: : "r"(regs->gr[30]));
regs->gr[31] = regs->gr[30] + 8;
/* Preserve original r28. */
regs->gr[28] = regs->orig_r28;
} else if (regs->gr[28] == -ERESTARTNOHAND ||
regs->gr[28] == -ERESTARTSYS ||
regs->gr[28] == -ERESTARTNOINTR) {
/* Hooray for delayed branching. We don't
have to restore %r20 (the system call
number) because it gets loaded in the delay
slot of the branch external instruction. */
regs->gr[31] -= 8;
/* Preserve original r28. */
regs->gr[28] = regs->orig_r28;
}
}
DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n",
regs->gr[28]);
return 0;
}

View File

@@ -0,0 +1,400 @@
/* Signal support for 32-bit kernel builds
*
* Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org>
* Code was mostly borrowed from kernel/signal.c.
* See kernel/signal.c for additional Copyrights.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/unistd.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/compat_signal.h>
#include <asm/uaccess.h>
#include "signal32.h"
#include "sys32.h"
#define DEBUG_COMPAT_SIG 0
#define DEBUG_COMPAT_SIG_LEVEL 2
#if DEBUG_COMPAT_SIG
#define DBG(LEVEL, ...) \
((DEBUG_COMPAT_SIG_LEVEL >= LEVEL) \
? printk(__VA_ARGS__) : (void) 0)
#else
#define DBG(LEVEL, ...)
#endif
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
inline void
sigset_32to64(sigset_t *s64, compat_sigset_t *s32)
{
s64->sig[0] = s32->sig[0] | ((unsigned long)s32->sig[1] << 32);
}
inline void
sigset_64to32(compat_sigset_t *s32, sigset_t *s64)
{
s32->sig[0] = s64->sig[0] & 0xffffffffUL;
s32->sig[1] = (s64->sig[0] >> 32) & 0xffffffffUL;
}
static int
put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
{
compat_sigset_t s;
if (sz != sizeof *set) panic("put_sigset32()");
sigset_64to32(&s, set);
return copy_to_user(up, &s, sizeof s);
}
static int
get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
{
compat_sigset_t s;
int r;
if (sz != sizeof *set) panic("put_sigset32()");
if ((r = copy_from_user(&s, up, sz)) == 0) {
sigset_32to64(set, &s);
}
return r;
}
int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, compat_sigset_t __user *oset,
unsigned int sigsetsize)
{
sigset_t old_set, new_set;
int ret;
if (set && get_sigset32(set, &new_set, sigsetsize))
return -EFAULT;
KERNEL_SYSCALL(ret, sys_rt_sigprocmask, how, set ? (sigset_t __user *)&new_set : NULL,
oset ? (sigset_t __user *)&old_set : NULL, sigsetsize);
if (!ret && oset && put_sigset32(oset, &old_set, sigsetsize))
return -EFAULT;
return ret;
}
int sys32_rt_sigpending(compat_sigset_t __user *uset, unsigned int sigsetsize)
{
int ret;
sigset_t set;
KERNEL_SYSCALL(ret, sys_rt_sigpending, (sigset_t __user *)&set, sigsetsize);
if (!ret && put_sigset32(uset, &set, sigsetsize))
return -EFAULT;
return ret;
}
long
sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, struct sigaction32 __user *oact,
size_t sigsetsize)
{
struct k_sigaction32 new_sa32, old_sa32;
struct k_sigaction new_sa, old_sa;
int ret = -EINVAL;
if (act) {
if (copy_from_user(&new_sa32.sa, act, sizeof new_sa32.sa))
return -EFAULT;
new_sa.sa.sa_handler = (__sighandler_t)(unsigned long)new_sa32.sa.sa_handler;
new_sa.sa.sa_flags = new_sa32.sa.sa_flags;
sigset_32to64(&new_sa.sa.sa_mask, &new_sa32.sa.sa_mask);
}
ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
if (!ret && oact) {
sigset_64to32(&old_sa32.sa.sa_mask, &old_sa.sa.sa_mask);
old_sa32.sa.sa_flags = old_sa.sa.sa_flags;
old_sa32.sa.sa_handler = (__sighandler_t32)(unsigned long)old_sa.sa.sa_handler;
if (copy_to_user(oact, &old_sa32.sa, sizeof old_sa32.sa))
return -EFAULT;
}
return ret;
}
int
do_sigaltstack32 (const compat_stack_t __user *uss32, compat_stack_t __user *uoss32, unsigned long sp)
{
compat_stack_t ss32, oss32;
stack_t ss, oss;
stack_t *ssp = NULL, *ossp = NULL;
int ret;
if (uss32) {
if (copy_from_user(&ss32, uss32, sizeof ss32))
return -EFAULT;
ss.ss_sp = (void __user *)(unsigned long)ss32.ss_sp;
ss.ss_flags = ss32.ss_flags;
ss.ss_size = ss32.ss_size;
ssp = &ss;
}
if (uoss32)
ossp = &oss;
KERNEL_SYSCALL(ret, do_sigaltstack, (const stack_t __user *)ssp, (stack_t __user *)ossp, sp);
if (!ret && uoss32) {
oss32.ss_sp = (unsigned int)(unsigned long)oss.ss_sp;
oss32.ss_flags = oss.ss_flags;
oss32.ss_size = oss.ss_size;
if (copy_to_user(uoss32, &oss32, sizeof *uoss32))
return -EFAULT;
}
return ret;
}
long
restore_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf,
struct pt_regs *regs)
{
long err = 0;
compat_uint_t compat_reg;
compat_uint_t compat_regt;
int regn;
/* When loading 32-bit values into 64-bit registers make
sure to clear the upper 32-bits */
DBG(2,"restore_sigcontext32: PER_LINUX32 process\n");
DBG(2,"restore_sigcontext32: sc = 0x%p, rf = 0x%p, regs = 0x%p\n", sc, rf, regs);
DBG(2,"restore_sigcontext32: compat_sigcontext is %#lx bytes\n", sizeof(*sc));
for(regn=0; regn < 32; regn++){
err |= __get_user(compat_reg,&sc->sc_gr[regn]);
regs->gr[regn] = compat_reg;
/* Load upper half */
err |= __get_user(compat_regt,&rf->rf_gr[regn]);
regs->gr[regn] = ((u64)compat_regt << 32) | (u64)compat_reg;
DBG(3,"restore_sigcontext32: gr%02d = %#lx (%#x / %#x)\n",
regn, regs->gr[regn], compat_regt, compat_reg);
}
DBG(2,"restore_sigcontext32: sc->sc_fr = 0x%p (%#lx)\n",sc->sc_fr, sizeof(sc->sc_fr));
/* XXX: BE WARNED FR's are 64-BIT! */
err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr));
/* Better safe than sorry, pass __get_user two things of
the same size and let gcc do the upward conversion to
64-bits */
err |= __get_user(compat_reg, &sc->sc_iaoq[0]);
/* Load upper half */
err |= __get_user(compat_regt, &rf->rf_iaoq[0]);
regs->iaoq[0] = ((u64)compat_regt << 32) | (u64)compat_reg;
DBG(2,"restore_sigcontext32: upper half of iaoq[0] = %#lx\n", compat_regt);
DBG(2,"restore_sigcontext32: sc->sc_iaoq[0] = %p => %#x\n",
&sc->sc_iaoq[0], compat_reg);
err |= __get_user(compat_reg, &sc->sc_iaoq[1]);
/* Load upper half */
err |= __get_user(compat_regt, &rf->rf_iaoq[1]);
regs->iaoq[1] = ((u64)compat_regt << 32) | (u64)compat_reg;
DBG(2,"restore_sigcontext32: upper half of iaoq[1] = %#lx\n", compat_regt);
DBG(2,"restore_sigcontext32: sc->sc_iaoq[1] = %p => %#x\n",
&sc->sc_iaoq[1],compat_reg);
DBG(2,"restore_sigcontext32: iaoq is %#lx / %#lx\n",
regs->iaoq[0],regs->iaoq[1]);
err |= __get_user(compat_reg, &sc->sc_iasq[0]);
/* Load the upper half for iasq */
err |= __get_user(compat_regt, &rf->rf_iasq[0]);
regs->iasq[0] = ((u64)compat_regt << 32) | (u64)compat_reg;
DBG(2,"restore_sigcontext32: upper half of iasq[0] = %#lx\n", compat_regt);
err |= __get_user(compat_reg, &sc->sc_iasq[1]);
/* Load the upper half for iasq */
err |= __get_user(compat_regt, &rf->rf_iasq[1]);
regs->iasq[1] = ((u64)compat_regt << 32) | (u64)compat_reg;
DBG(2,"restore_sigcontext32: upper half of iasq[1] = %#lx\n", compat_regt);
DBG(2,"restore_sigcontext32: iasq is %#lx / %#lx\n",
regs->iasq[0],regs->iasq[1]);
err |= __get_user(compat_reg, &sc->sc_sar);
/* Load the upper half for sar */
err |= __get_user(compat_regt, &rf->rf_sar);
regs->sar = ((u64)compat_regt << 32) | (u64)compat_reg;
DBG(2,"restore_sigcontext32: upper_half & sar = %#lx\n", compat_regt);
DBG(2,"restore_sigcontext32: sar is %#lx\n", regs->sar);
DBG(2,"restore_sigcontext32: r28 is %ld\n", regs->gr[28]);
return err;
}
/*
* Set up the sigcontext structure for this process.
* This is not an easy task if the kernel is 64-bit, it will require
* that we examine the process personality to determine if we need to
* truncate for a 32-bit userspace.
*/
long
setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf,
struct pt_regs *regs, int in_syscall)
{
compat_int_t flags = 0;
long err = 0;
compat_uint_t compat_reg;
compat_uint_t compat_regb;
int regn;
if (on_sig_stack((unsigned long) sc))
flags |= PARISC_SC_FLAG_ONSTACK;
if (in_syscall) {
DBG(1,"setup_sigcontext32: in_syscall\n");
flags |= PARISC_SC_FLAG_IN_SYSCALL;
/* Truncate gr31 */
compat_reg = (compat_uint_t)(regs->gr[31]);
/* regs->iaoq is undefined in the syscall return path */
err |= __put_user(compat_reg, &sc->sc_iaoq[0]);
DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n",
&sc->sc_iaoq[0], compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)(regs->gr[32] >> 32);
err |= __put_user(compat_reg, &rf->rf_iaoq[0]);
DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg);
compat_reg = (compat_uint_t)(regs->gr[31]+4);
err |= __put_user(compat_reg, &sc->sc_iaoq[1]);
DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n",
&sc->sc_iaoq[1], compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)((regs->gr[32]+4) >> 32);
err |= __put_user(compat_reg, &rf->rf_iaoq[1]);
DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg);
/* Truncate sr3 */
compat_reg = (compat_uint_t)(regs->sr[3]);
err |= __put_user(compat_reg, &sc->sc_iasq[0]);
err |= __put_user(compat_reg, &sc->sc_iasq[1]);
/* Store upper half */
compat_reg = (compat_uint_t)(regs->sr[3] >> 32);
err |= __put_user(compat_reg, &rf->rf_iasq[0]);
err |= __put_user(compat_reg, &rf->rf_iasq[1]);
DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg);
DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg);
DBG(1,"setup_sigcontext32: iaoq %#lx / %#lx\n",
regs->gr[31], regs->gr[31]+4);
} else {
compat_reg = (compat_uint_t)(regs->iaoq[0]);
err |= __put_user(compat_reg, &sc->sc_iaoq[0]);
DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n",
&sc->sc_iaoq[0], compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)(regs->iaoq[0] >> 32);
err |= __put_user(compat_reg, &rf->rf_iaoq[0]);
DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg);
compat_reg = (compat_uint_t)(regs->iaoq[1]);
err |= __put_user(compat_reg, &sc->sc_iaoq[1]);
DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n",
&sc->sc_iaoq[1], compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)(regs->iaoq[1] >> 32);
err |= __put_user(compat_reg, &rf->rf_iaoq[1]);
DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg);
compat_reg = (compat_uint_t)(regs->iasq[0]);
err |= __put_user(compat_reg, &sc->sc_iasq[0]);
DBG(2,"setup_sigcontext32: sc->sc_iasq[0] = %p <= %#x\n",
&sc->sc_iasq[0], compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)(regs->iasq[0] >> 32);
err |= __put_user(compat_reg, &rf->rf_iasq[0]);
DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg);
compat_reg = (compat_uint_t)(regs->iasq[1]);
err |= __put_user(compat_reg, &sc->sc_iasq[1]);
DBG(2,"setup_sigcontext32: sc->sc_iasq[1] = %p <= %#x\n",
&sc->sc_iasq[1], compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)(regs->iasq[1] >> 32);
err |= __put_user(compat_reg, &rf->rf_iasq[1]);
DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg);
/* Print out the IAOQ for debugging */
DBG(1,"setup_sigcontext32: ia0q %#lx / %#lx\n",
regs->iaoq[0], regs->iaoq[1]);
}
err |= __put_user(flags, &sc->sc_flags);
DBG(1,"setup_sigcontext32: Truncating general registers.\n");
for(regn=0; regn < 32; regn++){
/* Truncate a general register */
compat_reg = (compat_uint_t)(regs->gr[regn]);
err |= __put_user(compat_reg, &sc->sc_gr[regn]);
/* Store upper half */
compat_regb = (compat_uint_t)(regs->gr[regn] >> 32);
err |= __put_user(compat_regb, &rf->rf_gr[regn]);
/* DEBUG: Write out the "upper / lower" register data */
DBG(2,"setup_sigcontext32: gr%02d = %#x / %#x\n", regn,
compat_regb, compat_reg);
}
/* Copy the floating point registers (same size)
XXX: BE WARNED FR's are 64-BIT! */
DBG(1,"setup_sigcontext32: Copying from regs to sc, "
"sc->sc_fr size = %#lx, regs->fr size = %#lx\n",
sizeof(regs->fr), sizeof(sc->sc_fr));
err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr));
compat_reg = (compat_uint_t)(regs->sar);
err |= __put_user(compat_reg, &sc->sc_sar);
DBG(2,"setup_sigcontext32: sar is %#x\n", compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)(regs->sar >> 32);
err |= __put_user(compat_reg, &rf->rf_sar);
DBG(2,"setup_sigcontext32: upper half sar = %#x\n", compat_reg);
DBG(1,"setup_sigcontext32: r28 is %ld\n", regs->gr[28]);
return err;
}

View File

@@ -0,0 +1,43 @@
/*
* Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2003 Carlos O'Donell <carlos at parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _PARISC64_KERNEL_SIGNAL32_H
#define _PARISC64_KERNEL_SIGNAL32_H
#include <linux/compat.h>
#include <asm/compat_signal.h>
#include <asm/compat_rt_sigframe.h>
/* ELF32 signal handling */
struct k_sigaction32 {
struct compat_sigaction sa;
};
void sigset_32to64(sigset_t *s64, compat_sigset_t *s32);
void sigset_64to32(compat_sigset_t *s32, sigset_t *s64);
int do_sigaltstack32 (const compat_stack_t __user *uss32,
compat_stack_t __user *uoss32, unsigned long sp);
long restore_sigcontext32(struct compat_sigcontext __user *sc,
struct compat_regfile __user *rf,
struct pt_regs *regs);
long setup_sigcontext32(struct compat_sigcontext __user *sc,
struct compat_regfile __user *rf,
struct pt_regs *regs, int in_syscall);
#endif

723
arch/parisc/kernel/smp.c Normal file
View File

@@ -0,0 +1,723 @@
/*
** SMP Support
**
** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org>
**
** Lots of stuff stolen from arch/alpha/kernel/smp.c
** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
**
** Thanks to John Curry and Ullas Ponnadi. I learned alot from their work.
** -grant (1/12/2001)
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
*/
#undef ENTRY_SYS_CPUS /* syscall support for iCOD-like functionality */
#include <linux/autoconf.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/kernel_stat.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/atomic.h>
#include <asm/current.h>
#include <asm/delay.h>
#include <asm/pgalloc.h> /* for flush_tlb_all() proto/macro */
#include <asm/io.h>
#include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#define kDEBUG 0
DEFINE_SPINLOCK(smp_lock);
volatile struct task_struct *smp_init_current_idle_task;
static volatile int cpu_now_booting = 0; /* track which CPU is booting */
static int parisc_max_cpus = 1;
/* online cpus are ones that we've managed to bring up completely
* possible cpus are all valid cpu
* present cpus are all detected cpu
*
* On startup we bring up the "possible" cpus. Since we discover
* CPUs later, we add them as hotplug, so the possible cpu mask is
* empty in the beginning.
*/
cpumask_t cpu_online_map = CPU_MASK_NONE; /* Bitmap of online CPUs */
cpumask_t cpu_possible_map = CPU_MASK_ALL; /* Bitmap of Present CPUs */
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(cpu_possible_map);
struct smp_call_struct {
void (*func) (void *info);
void *info;
long wait;
atomic_t unstarted_count;
atomic_t unfinished_count;
};
static volatile struct smp_call_struct *smp_call_function_data;
enum ipi_message_type {
IPI_NOP=0,
IPI_RESCHEDULE=1,
IPI_CALL_FUNC,
IPI_CPU_START,
IPI_CPU_STOP,
IPI_CPU_TEST
};
/********** SMP inter processor interrupt and communication routines */
#undef PER_CPU_IRQ_REGION
#ifdef PER_CPU_IRQ_REGION
/* XXX REVISIT Ignore for now.
** *May* need this "hook" to register IPI handler
** once we have perCPU ExtIntr switch tables.
*/
static void
ipi_init(int cpuid)
{
/* If CPU is present ... */
#ifdef ENTRY_SYS_CPUS
/* *and* running (not stopped) ... */
#error iCOD support wants state checked here.
#endif
#error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
if(cpu_online(cpuid) )
{
switch_to_idle_task(current);
}
return;
}
#endif
/*
** Yoink this CPU from the runnable list...
**
*/
static void
halt_processor(void)
{
#ifdef ENTRY_SYS_CPUS
#error halt_processor() needs rework
/*
** o migrate I/O interrupts off this CPU.
** o leave IPI enabled - __cli() will disable IPI.
** o leave CPU in online map - just change the state
*/
cpu_data[this_cpu].state = STATE_STOPPED;
mark_bh(IPI_BH);
#else
/* REVISIT : redirect I/O Interrupts to another CPU? */
/* REVISIT : does PM *know* this CPU isn't available? */
cpu_clear(smp_processor_id(), cpu_online_map);
local_irq_disable();
for (;;)
;
#endif
}
irqreturn_t
ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
int this_cpu = smp_processor_id();
struct cpuinfo_parisc *p = &cpu_data[this_cpu];
unsigned long ops;
unsigned long flags;
/* Count this now; we may make a call that never returns. */
p->ipi_count++;
mb(); /* Order interrupt and bit testing. */
for (;;) {
spin_lock_irqsave(&(p->lock),flags);
ops = p->pending_ipi;
p->pending_ipi = 0;
spin_unlock_irqrestore(&(p->lock),flags);
mb(); /* Order bit clearing and data access. */
if (!ops)
break;
while (ops) {
unsigned long which = ffz(~ops);
switch (which) {
case IPI_RESCHEDULE:
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu);
#endif /* kDEBUG */
ops &= ~(1 << IPI_RESCHEDULE);
/*
* Reschedule callback. Everything to be
* done is done by the interrupt return path.
*/
break;
case IPI_CALL_FUNC:
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu);
#endif /* kDEBUG */
ops &= ~(1 << IPI_CALL_FUNC);
{
volatile struct smp_call_struct *data;
void (*func)(void *info);
void *info;
int wait;
data = smp_call_function_data;
func = data->func;
info = data->info;
wait = data->wait;
mb();
atomic_dec ((atomic_t *)&data->unstarted_count);
/* At this point, *data can't
* be relied upon.
*/
(*func)(info);
/* Notify the sending CPU that the
* task is done.
*/
mb();
if (wait)
atomic_dec ((atomic_t *)&data->unfinished_count);
}
break;
case IPI_CPU_START:
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu);
#endif /* kDEBUG */
ops &= ~(1 << IPI_CPU_START);
#ifdef ENTRY_SYS_CPUS
p->state = STATE_RUNNING;
#endif
break;
case IPI_CPU_STOP:
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu);
#endif /* kDEBUG */
ops &= ~(1 << IPI_CPU_STOP);
#ifdef ENTRY_SYS_CPUS
#else
halt_processor();
#endif
break;
case IPI_CPU_TEST:
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu);
#endif /* kDEBUG */
ops &= ~(1 << IPI_CPU_TEST);
break;
default:
printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
this_cpu, which);
ops &= ~(1 << which);
return IRQ_NONE;
} /* Switch */
} /* while (ops) */
}
return IRQ_HANDLED;
}
static inline void
ipi_send(int cpu, enum ipi_message_type op)
{
struct cpuinfo_parisc *p = &cpu_data[cpu];
unsigned long flags;
spin_lock_irqsave(&(p->lock),flags);
p->pending_ipi |= 1 << op;
gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa);
spin_unlock_irqrestore(&(p->lock),flags);
}
static inline void
send_IPI_single(int dest_cpu, enum ipi_message_type op)
{
if (dest_cpu == NO_PROC_ID) {
BUG();
return;
}
ipi_send(dest_cpu, op);
}
static inline void
send_IPI_allbutself(enum ipi_message_type op)
{
int i;
for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i) && i != smp_processor_id())
send_IPI_single(i, op);
}
}
inline void
smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); }
static inline void
smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); }
void
smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
/**
* Run a function on all other CPUs.
* <func> The function to run. This must be fast and non-blocking.
* <info> An arbitrary pointer to pass to the function.
* <retry> If true, keep retrying until ready.
* <wait> If true, wait until function has completed on other CPUs.
* [RETURNS] 0 on success, else a negative status code.
*
* Does not return until remote CPUs are nearly ready to execute <func>
* or have executed.
*/
int
smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
{
struct smp_call_struct data;
unsigned long timeout;
static DEFINE_SPINLOCK(lock);
int retries = 0;
if (num_online_cpus() < 2)
return 0;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
data.func = func;
data.info = info;
data.wait = wait;
atomic_set(&data.unstarted_count, num_online_cpus() - 1);
atomic_set(&data.unfinished_count, num_online_cpus() - 1);
if (retry) {
spin_lock (&lock);
while (smp_call_function_data != 0)
barrier();
}
else {
spin_lock (&lock);
if (smp_call_function_data) {
spin_unlock (&lock);
return -EBUSY;
}
}
smp_call_function_data = &data;
spin_unlock (&lock);
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_allbutself(IPI_CALL_FUNC);
retry:
/* Wait for response */
timeout = jiffies + HZ;
while ( (atomic_read (&data.unstarted_count) > 0) &&
time_before (jiffies, timeout) )
barrier ();
if (atomic_read (&data.unstarted_count) > 0) {
printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n",
smp_processor_id(), ++retries);
goto retry;
}
/* We either got one or timed out. Release the lock */
mb();
smp_call_function_data = NULL;
while (wait && atomic_read (&data.unfinished_count) > 0)
barrier ();
return 0;
}
EXPORT_SYMBOL(smp_call_function);
/*
* Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
* as we want to ensure all TLB's flushed before proceeding.
*/
extern void flush_tlb_all_local(void);
void
smp_flush_tlb_all(void)
{
on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
}
void
smp_do_timer(struct pt_regs *regs)
{
int cpu = smp_processor_id();
struct cpuinfo_parisc *data = &cpu_data[cpu];
if (!--data->prof_counter) {
data->prof_counter = data->prof_multiplier;
update_process_times(user_mode(regs));
}
}
/*
* Called by secondaries to update state and initialize CPU registers.
*/
static void __init
smp_cpu_init(int cpunum)
{
extern int init_per_cpu(int); /* arch/parisc/kernel/setup.c */
extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
/* Set modes and Enable floating point coprocessor */
(void) init_per_cpu(cpunum);
disable_sr_hashing();
mb();
/* Well, support 2.4 linux scheme as well. */
if (cpu_test_and_set(cpunum, cpu_online_map))
{
extern void machine_halt(void); /* arch/parisc.../process.c */
printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
machine_halt();
}
/* Initialise the idle task for this CPU */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
if(current->mm)
BUG();
enter_lazy_tlb(&init_mm, current);
init_IRQ(); /* make sure no IRQ's are enabled or pending */
}
/*
* Slaves start using C here. Indirectly called from smp_slave_stext.
* Do what start_kernel() and main() do for boot strap processor (aka monarch)
*/
void __init smp_callin(void)
{
int slave_id = cpu_now_booting;
#if 0
void *istack;
#endif
smp_cpu_init(slave_id);
#if 0 /* NOT WORKING YET - see entry.S */
istack = (void *)__get_free_pages(GFP_KERNEL,ISTACK_ORDER);
if (istack == NULL) {
printk(KERN_CRIT "Failed to allocate interrupt stack for cpu %d\n",slave_id);
BUG();
}
mtctl(istack,31);
#endif
flush_cache_all_local(); /* start with known state */
flush_tlb_all_local();
local_irq_enable(); /* Interrupts have been off until now */
cpu_idle(); /* Wait for timer to schedule some work */
/* NOTREACHED */
panic("smp_callin() AAAAaaaaahhhh....\n");
}
/*
* Bring one cpu online.
*/
int __init smp_boot_one_cpu(int cpuid)
{
struct task_struct *idle;
long timeout;
/*
* Create an idle task for this CPU. Note the address wed* give
* to kernel_thread is irrelevant -- it's going to start
* where OS_BOOT_RENDEVZ vector in SAL says to start. But
* this gets all the other task-y sort of data structures set
* up like we wish. We need to pull the just created idle task
* off the run queue and stuff it into the init_tasks[] array.
* Sheesh . . .
*/
idle = fork_idle(cpuid);
if (IS_ERR(idle))
panic("SMP: fork failed for CPU:%d", cpuid);
idle->thread_info->cpu = cpuid;
/* Let _start know what logical CPU we're booting
** (offset into init_tasks[],cpu_data[])
*/
cpu_now_booting = cpuid;
/*
** boot strap code needs to know the task address since
** it also contains the process stack.
*/
smp_init_current_idle_task = idle ;
mb();
printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa);
/*
** This gets PDC to release the CPU from a very tight loop.
**
** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
** is executed after receiving the rendezvous signal (an interrupt to
** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
** contents of memory are valid."
*/
gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, cpu_data[cpuid].hpa);
mb();
/*
* OK, wait a bit for that CPU to finish staggering about.
* Slave will set a bit when it reaches smp_cpu_init().
* Once the "monarch CPU" sees the bit change, it can move on.
*/
for (timeout = 0; timeout < 10000; timeout++) {
if(cpu_online(cpuid)) {
/* Which implies Slave has started up */
cpu_now_booting = 0;
smp_init_current_idle_task = NULL;
goto alive ;
}
udelay(100);
barrier();
}
put_task_struct(idle);
idle = NULL;
printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
return -1;
alive:
/* Remember the Slave data */
#if (kDEBUG>=100)
printk(KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
cpuid, timeout * 100);
#endif /* kDEBUG */
#ifdef ENTRY_SYS_CPUS
cpu_data[cpuid].state = STATE_RUNNING;
#endif
return 0;
}
void __devinit smp_prepare_boot_cpu(void)
{
int bootstrap_processor=cpu_data[0].cpuid; /* CPU ID of BSP */
#ifdef ENTRY_SYS_CPUS
cpu_data[0].state = STATE_RUNNING;
#endif
/* Setup BSP mappings */
printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
cpu_set(bootstrap_processor, cpu_online_map);
cpu_set(bootstrap_processor, cpu_present_map);
}
/*
** inventory.c:do_inventory() hasn't yet been run and thus we
** don't 'discover' the additional CPU's until later.
*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
cpus_clear(cpu_present_map);
cpu_set(0, cpu_present_map);
parisc_max_cpus = max_cpus;
if (!max_cpus)
printk(KERN_INFO "SMP mode deactivated.\n");
}
void smp_cpus_done(unsigned int cpu_max)
{
return;
}
int __devinit __cpu_up(unsigned int cpu)
{
if (cpu != 0 && cpu < parisc_max_cpus)
smp_boot_one_cpu(cpu);
return cpu_online(cpu) ? 0 : -ENOSYS;
}
#ifdef ENTRY_SYS_CPUS
/* Code goes along with:
** entry.s: ENTRY_NAME(sys_cpus) / * 215, for cpu stat * /
*/
int sys_cpus(int argc, char **argv)
{
int i,j=0;
extern int current_pid(int cpu);
if( argc > 2 ) {
printk("sys_cpus:Only one argument supported\n");
return (-1);
}
if ( argc == 1 ){
#ifdef DUMP_MORE_STATE
for(i=0; i<NR_CPUS; i++) {
int cpus_per_line = 4;
if(cpu_online(i)) {
if (j++ % cpus_per_line)
printk(" %3d",i);
else
printk("\n %3d",i);
}
}
printk("\n");
#else
printk("\n 0\n");
#endif
} else if((argc==2) && !(strcmp(argv[1],"-l"))) {
printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n");
#ifdef DUMP_MORE_STATE
for(i=0;i<NR_CPUS;i++) {
if (!cpu_online(i))
continue;
if (cpu_data[i].cpuid != NO_PROC_ID) {
switch(cpu_data[i].state) {
case STATE_RENDEZVOUS:
printk("RENDEZVS ");
break;
case STATE_RUNNING:
printk((current_pid(i)!=0) ? "RUNNING " : "IDLING ");
break;
case STATE_STOPPED:
printk("STOPPED ");
break;
case STATE_HALTED:
printk("HALTED ");
break;
default:
printk("%08x?", cpu_data[i].state);
break;
}
if(cpu_online(i)) {
printk(" %4d",current_pid(i));
}
printk(" %6d",cpu_number_map(i));
printk(" %5d",i);
printk(" 0x%lx\n",cpu_data[i].hpa);
}
}
#else
printk("\n%s %4d 0 0 --------",
(current->pid)?"RUNNING ": "IDLING ",current->pid);
#endif
} else if ((argc==2) && !(strcmp(argv[1],"-s"))) {
#ifdef DUMP_MORE_STATE
printk("\nCPUSTATE CPUID\n");
for (i=0;i<NR_CPUS;i++) {
if (!cpu_online(i))
continue;
if (cpu_data[i].cpuid != NO_PROC_ID) {
switch(cpu_data[i].state) {
case STATE_RENDEZVOUS:
printk("RENDEZVS");break;
case STATE_RUNNING:
printk((current_pid(i)!=0) ? "RUNNING " : "IDLING");
break;
case STATE_STOPPED:
printk("STOPPED ");break;
case STATE_HALTED:
printk("HALTED ");break;
default:
}
printk(" %5d\n",i);
}
}
#else
printk("\n%s CPU0",(current->pid==0)?"RUNNING ":"IDLING ");
#endif
} else {
printk("sys_cpus:Unknown request\n");
return (-1);
}
return 0;
}
#endif /* ENTRY_SYS_CPUS */
#ifdef CONFIG_PROC_FS
int __init
setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
#endif

View File

@@ -0,0 +1,48 @@
/*
* Copyright (C) 2002 Richard Hirst <rhirst at parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
* Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _PARISC64_KERNEL_SYS32_H
#define _PARISC64_KERNEL_SYS32_H
#include <linux/compat.h>
/* Call a kernel syscall which will use kernel space instead of user
* space for its copy_to/from_user.
*/
#define KERNEL_SYSCALL(ret, syscall, args...) \
{ \
mm_segment_t old_fs = get_fs(); \
set_fs(KERNEL_DS); \
ret = syscall(args); \
set_fs (old_fs); \
}
#ifdef CONFIG_COMPAT
typedef __u32 __sighandler_t32;
struct sigaction32 {
__sighandler_t32 sa_handler;
unsigned int sa_flags;
compat_sigset_t sa_mask; /* mask last for extensibility */
};
#endif
#endif

View File

@@ -0,0 +1,253 @@
/*
* PARISC specific syscalls
*
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/uaccess.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/shm.h>
#include <linux/smp_lock.h>
#include <linux/syscalls.h>
int sys_pipe(int __user *fildes)
{
int fd[2];
int error;
error = do_pipe(fd);
if (!error) {
if (copy_to_user(fildes, fd, 2*sizeof(int)))
error = -EFAULT;
}
return error;
}
static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
{
struct vm_area_struct *vma;
addr = PAGE_ALIGN(addr);
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
if (!vma || addr + len <= vma->vm_start)
return addr;
addr = vma->vm_end;
}
}
#define DCACHE_ALIGN(addr) (((addr) + (SHMLBA - 1)) &~ (SHMLBA - 1))
/*
* We need to know the offset to use. Old scheme was to look for
* existing mapping and use the same offset. New scheme is to use the
* address of the kernel data structure as the seed for the offset.
* We'll see how that works...
*
* The mapping is cacheline aligned, so there's no information in the bottom
* few bits of the address. We're looking for 10 bits (4MB / 4k), so let's
* drop the bottom 8 bits and use bits 8-17.
*/
static int get_offset(struct address_space *mapping)
{
int offset = (unsigned long) mapping << (PAGE_SHIFT - 8);
return offset & 0x3FF000;
}
static unsigned long get_shared_area(struct address_space *mapping,
unsigned long addr, unsigned long len, unsigned long pgoff)
{
struct vm_area_struct *vma;
int offset = mapping ? get_offset(mapping) : 0;
addr = DCACHE_ALIGN(addr - offset) + offset;
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
if (!vma || addr + len <= vma->vm_start)
return addr;
addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
if (addr < vma->vm_end) /* handle wraparound */
return -ENOMEM;
}
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
if (len > TASK_SIZE)
return -ENOMEM;
if (!addr)
addr = TASK_UNMAPPED_BASE;
if (filp) {
addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
} else if(flags & MAP_SHARED) {
addr = get_shared_area(NULL, addr, len, pgoff);
} else {
addr = get_unshared_area(addr, len);
}
return addr;
}
static unsigned long do_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long pgoff)
{
struct file * file = NULL;
unsigned long error = -EBADF;
if (!(flags & MAP_ANONYMOUS)) {
file = fget(fd);
if (!file)
goto out;
}
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
down_write(&current->mm->mmap_sem);
error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
up_write(&current->mm->mmap_sem);
if (file != NULL)
fput(file);
out:
return error;
}
asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long pgoff)
{
/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
we have. */
return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
}
asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long offset)
{
if (!(offset & ~PAGE_MASK)) {
return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
} else {
return -EINVAL;
}
}
long sys_shmat_wrapper(int shmid, char __user *shmaddr, int shmflag)
{
unsigned long raddr;
int r;
r = do_shmat(shmid, shmaddr, shmflag, &raddr);
if (r < 0)
return r;
return raddr;
}
/* Fucking broken ABI */
#ifdef CONFIG_64BIT
asmlinkage long parisc_truncate64(const char __user * path,
unsigned int high, unsigned int low)
{
return sys_truncate(path, (long)high << 32 | low);
}
asmlinkage long parisc_ftruncate64(unsigned int fd,
unsigned int high, unsigned int low)
{
return sys_ftruncate(fd, (long)high << 32 | low);
}
/* stubs for the benefit of the syscall_table since truncate64 and truncate
* are identical on LP64 */
asmlinkage long sys_truncate64(const char __user * path, unsigned long length)
{
return sys_truncate(path, length);
}
asmlinkage long sys_ftruncate64(unsigned int fd, unsigned long length)
{
return sys_ftruncate(fd, length);
}
asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
{
return sys_fcntl(fd, cmd, arg);
}
#else
asmlinkage long parisc_truncate64(const char __user * path,
unsigned int high, unsigned int low)
{
return sys_truncate64(path, (loff_t)high << 32 | low);
}
asmlinkage long parisc_ftruncate64(unsigned int fd,
unsigned int high, unsigned int low)
{
return sys_ftruncate64(fd, (loff_t)high << 32 | low);
}
#endif
asmlinkage ssize_t parisc_pread64(unsigned int fd, char __user *buf, size_t count,
unsigned int high, unsigned int low)
{
return sys_pread64(fd, buf, count, (loff_t)high << 32 | low);
}
asmlinkage ssize_t parisc_pwrite64(unsigned int fd, const char __user *buf,
size_t count, unsigned int high, unsigned int low)
{
return sys_pwrite64(fd, buf, count, (loff_t)high << 32 | low);
}
asmlinkage ssize_t parisc_readahead(int fd, unsigned int high, unsigned int low,
size_t count)
{
return sys_readahead(fd, (loff_t)high << 32 | low, count);
}
asmlinkage long parisc_fadvise64_64(int fd,
unsigned int high_off, unsigned int low_off,
unsigned int high_len, unsigned int low_len, int advice)
{
return sys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off,
(loff_t)high_len << 32 | low_len, advice);
}
asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag)
{
return -ENOMEM;
}
asmlinkage int sys_free_hugepages(unsigned long addr)
{
return -EINVAL;
}

View File

@@ -0,0 +1,720 @@
/*
* sys_parisc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 2000-2001 Hewlett Packard Company
* Copyright (C) 2000 John Marvin
* Copyright (C) 2001 Matthew Wilcox
*
* These routines maintain argument size conversion between 32bit and 64bit
* environment. Based heavily on sys_ia32.c and sys_sparc32.c.
*/
#include <linux/config.h>
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/utsname.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/slab.h>
#include <linux/uio.h>
#include <linux/nfs_fs.h>
#include <linux/ncp_fs.h>
#include <linux/sunrpc/svc.h>
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/cache.h>
#include <linux/nfsd/xdr.h>
#include <linux/nfsd/syscall.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
#include <linux/highmem.h>
#include <linux/highuid.h>
#include <linux/mman.h>
#include <linux/binfmts.h>
#include <linux/namei.h>
#include <linux/vfs.h>
#include <linux/ptrace.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <asm/types.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
#include <asm/mmu_context.h>
#include "sys32.h"
#undef DEBUG
#ifdef DEBUG
#define DBG(x) printk x
#else
#define DBG(x)
#endif
/*
* sys32_execve() executes a new program.
*/
asmlinkage int sys32_execve(struct pt_regs *regs)
{
int error;
char *filename;
DBG(("sys32_execve(%p) r26 = 0x%lx\n", regs, regs->gr[26]));
filename = getname((const char __user *) regs->gr[26]);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = compat_do_execve(filename, compat_ptr(regs->gr[25]),
compat_ptr(regs->gr[24]), regs);
if (error == 0) {
task_lock(current);
current->ptrace &= ~PT_DTRACE;
task_unlock(current);
}
putname(filename);
out:
return error;
}
asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
int r22, int r21, int r20)
{
printk(KERN_ERR "%s(%d): Unimplemented 32 on 64 syscall #%d!\n",
current->comm, current->pid, r20);
return -ENOSYS;
}
#ifdef CONFIG_SYSCTL
struct __sysctl_args32 {
u32 name;
int nlen;
u32 oldval;
u32 oldlenp;
u32 newval;
u32 newlen;
u32 __unused[4];
};
asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
{
struct __sysctl_args32 tmp;
int error;
unsigned int oldlen32;
size_t oldlen, *oldlenp = NULL;
unsigned long addr = (((long __force)&args->__unused[0]) + 7) & ~7;
extern int do_sysctl(int *name, int nlen, void *oldval, size_t *oldlenp,
void *newval, size_t newlen);
DBG(("sysctl32(%p)\n", args));
if (copy_from_user(&tmp, args, sizeof(tmp)))
return -EFAULT;
if (tmp.oldval && tmp.oldlenp) {
/* Duh, this is ugly and might not work if sysctl_args
is in read-only memory, but do_sysctl does indirectly
a lot of uaccess in both directions and we'd have to
basically copy the whole sysctl.c here, and
glibc's __sysctl uses rw memory for the structure
anyway. */
/* a possibly better hack than this, which will avoid the
* problem if the struct is read only, is to push the
* 'oldlen' value out to the user's stack instead. -PB
*/
if (get_user(oldlen32, (u32 *)(u64)tmp.oldlenp))
return -EFAULT;
oldlen = oldlen32;
if (put_user(oldlen, (size_t *)addr))
return -EFAULT;
oldlenp = (size_t *)addr;
}
lock_kernel();
error = do_sysctl((int *)(u64)tmp.name, tmp.nlen, (void *)(u64)tmp.oldval,
oldlenp, (void *)(u64)tmp.newval, tmp.newlen);
unlock_kernel();
if (oldlenp) {
if (!error) {
if (get_user(oldlen, (size_t *)addr)) {
error = -EFAULT;
} else {
oldlen32 = oldlen;
if (put_user(oldlen32, (u32 *)(u64)tmp.oldlenp))
error = -EFAULT;
}
}
if (copy_to_user(&args->__unused[0], tmp.__unused, sizeof(tmp.__unused)))
error = -EFAULT;
}
return error;
}
#endif /* CONFIG_SYSCTL */
asmlinkage long sys32_sched_rr_get_interval(pid_t pid,
struct compat_timespec __user *interval)
{
struct timespec t;
int ret;
KERNEL_SYSCALL(ret, sys_sched_rr_get_interval, pid, (struct timespec __user *)&t);
if (put_compat_timespec(&t, interval))
return -EFAULT;
return ret;
}
static int
put_compat_timeval(struct compat_timeval __user *u, struct timeval *t)
{
struct compat_timeval t32;
t32.tv_sec = t->tv_sec;
t32.tv_usec = t->tv_usec;
return copy_to_user(u, &t32, sizeof t32);
}
static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
{
long usec;
if (__get_user(o->tv_sec, &i->tv_sec))
return -EFAULT;
if (__get_user(usec, &i->tv_usec))
return -EFAULT;
o->tv_nsec = usec * 1000;
return 0;
}
asmlinkage int
sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
{
extern void do_gettimeofday(struct timeval *tv);
if (tv) {
struct timeval ktv;
do_gettimeofday(&ktv);
if (put_compat_timeval(tv, &ktv))
return -EFAULT;
}
if (tz) {
extern struct timezone sys_tz;
if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
return -EFAULT;
}
return 0;
}
asmlinkage
int sys32_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
{
struct timespec kts;
struct timezone ktz;
if (tv) {
if (get_ts32(&kts, tv))
return -EFAULT;
}
if (tz) {
if (copy_from_user(&ktz, tz, sizeof(ktz)))
return -EFAULT;
}
return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
}
int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
{
int err;
if (stat->size > MAX_NON_LFS || !new_valid_dev(stat->dev) ||
!new_valid_dev(stat->rdev))
return -EOVERFLOW;
err = put_user(new_encode_dev(stat->dev), &statbuf->st_dev);
err |= put_user(stat->ino, &statbuf->st_ino);
err |= put_user(stat->mode, &statbuf->st_mode);
err |= put_user(stat->nlink, &statbuf->st_nlink);
err |= put_user(0, &statbuf->st_reserved1);
err |= put_user(0, &statbuf->st_reserved2);
err |= put_user(new_encode_dev(stat->rdev), &statbuf->st_rdev);
err |= put_user(stat->size, &statbuf->st_size);
err |= put_user(stat->atime.tv_sec, &statbuf->st_atime);
err |= put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec);
err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
err |= put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec);
err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
err |= put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec);
err |= put_user(stat->blksize, &statbuf->st_blksize);
err |= put_user(stat->blocks, &statbuf->st_blocks);
err |= put_user(0, &statbuf->__unused1);
err |= put_user(0, &statbuf->__unused2);
err |= put_user(0, &statbuf->__unused3);
err |= put_user(0, &statbuf->__unused4);
err |= put_user(0, &statbuf->__unused5);
err |= put_user(0, &statbuf->st_fstype); /* not avail */
err |= put_user(0, &statbuf->st_realdev); /* not avail */
err |= put_user(0, &statbuf->st_basemode); /* not avail */
err |= put_user(0, &statbuf->st_spareshort);
err |= put_user(stat->uid, &statbuf->st_uid);
err |= put_user(stat->gid, &statbuf->st_gid);
err |= put_user(0, &statbuf->st_spare4[0]);
err |= put_user(0, &statbuf->st_spare4[1]);
err |= put_user(0, &statbuf->st_spare4[2]);
return err;
}
struct linux32_dirent {
u32 d_ino;
compat_off_t d_off;
u16 d_reclen;
char d_name[1];
};
struct old_linux32_dirent {
u32 d_ino;
u32 d_offset;
u16 d_namlen;
char d_name[1];
};
struct getdents32_callback {
struct linux32_dirent __user * current_dir;
struct linux32_dirent __user * previous;
int count;
int error;
};
struct readdir32_callback {
struct old_linux32_dirent __user * dirent;
int count;
};
#define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
static int
filldir32 (void *__buf, const char *name, int namlen, loff_t offset, ino_t ino,
unsigned int d_type)
{
struct linux32_dirent __user * dirent;
struct getdents32_callback * buf = (struct getdents32_callback *) __buf;
int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1, 4);
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
dirent = buf->previous;
if (dirent)
put_user(offset, &dirent->d_off);
dirent = buf->current_dir;
buf->previous = dirent;
put_user(ino, &dirent->d_ino);
put_user(reclen, &dirent->d_reclen);
copy_to_user(dirent->d_name, name, namlen);
put_user(0, dirent->d_name + namlen);
dirent = ((void __user *)dirent) + reclen;
buf->current_dir = dirent;
buf->count -= reclen;
return 0;
}
asmlinkage long
sys32_getdents (unsigned int fd, void __user * dirent, unsigned int count)
{
struct file * file;
struct linux32_dirent __user * lastdirent;
struct getdents32_callback buf;
int error;
error = -EBADF;
file = fget(fd);
if (!file)
goto out;
buf.current_dir = (struct linux32_dirent __user *) dirent;
buf.previous = NULL;
buf.count = count;
buf.error = 0;
error = vfs_readdir(file, filldir32, &buf);
if (error < 0)
goto out_putf;
error = buf.error;
lastdirent = buf.previous;
if (lastdirent) {
put_user(file->f_pos, &lastdirent->d_off);
error = count - buf.count;
}
out_putf:
fput(file);
out:
return error;
}
static int
fillonedir32 (void * __buf, const char * name, int namlen, loff_t offset, ino_t ino,
unsigned int d_type)
{
struct readdir32_callback * buf = (struct readdir32_callback *) __buf;
struct old_linux32_dirent __user * dirent;
if (buf->count)
return -EINVAL;
buf->count++;
dirent = buf->dirent;
put_user(ino, &dirent->d_ino);
put_user(offset, &dirent->d_offset);
put_user(namlen, &dirent->d_namlen);
copy_to_user(dirent->d_name, name, namlen);
put_user(0, dirent->d_name + namlen);
return 0;
}
asmlinkage long
sys32_readdir (unsigned int fd, void __user * dirent, unsigned int count)
{
int error;
struct file * file;
struct readdir32_callback buf;
error = -EBADF;
file = fget(fd);
if (!file)
goto out;
buf.count = 0;
buf.dirent = dirent;
error = vfs_readdir(file, fillonedir32, &buf);
if (error >= 0)
error = buf.count;
fput(file);
out:
return error;
}
/*** copied from mips64 ***/
/*
* Ooo, nasty. We need here to frob 32-bit unsigned longs to
* 64-bit unsigned longs.
*/
static inline int
get_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset)
{
n = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32));
if (ufdset) {
unsigned long odd;
if (!access_ok(VERIFY_WRITE, ufdset, n*sizeof(u32)))
return -EFAULT;
odd = n & 1UL;
n &= ~1UL;
while (n) {
unsigned long h, l;
__get_user(l, ufdset);
__get_user(h, ufdset+1);
ufdset += 2;
*fdset++ = h << 32 | l;
n -= 2;
}
if (odd)
__get_user(*fdset, ufdset);
} else {
/* Tricky, must clear full unsigned long in the
* kernel fdset at the end, this makes sure that
* actually happens.
*/
memset(fdset, 0, ((n + 1) & ~1)*sizeof(u32));
}
return 0;
}
static inline void
set_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset)
{
unsigned long odd;
n = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32));
if (!ufdset)
return;
odd = n & 1UL;
n &= ~1UL;
while (n) {
unsigned long h, l;
l = *fdset++;
h = l >> 32;
__put_user(l, ufdset);
__put_user(h, ufdset+1);
ufdset += 2;
n -= 2;
}
if (odd)
__put_user(*fdset, ufdset);
}
struct msgbuf32 {
int mtype;
char mtext[1];
};
asmlinkage long sys32_msgsnd(int msqid,
struct msgbuf32 __user *umsgp32,
size_t msgsz, int msgflg)
{
struct msgbuf *mb;
struct msgbuf32 mb32;
int err;
if ((mb = kmalloc(msgsz + sizeof *mb + 4, GFP_KERNEL)) == NULL)
return -ENOMEM;
err = get_user(mb32.mtype, &umsgp32->mtype);
mb->mtype = mb32.mtype;
err |= copy_from_user(mb->mtext, &umsgp32->mtext, msgsz);
if (err)
err = -EFAULT;
else
KERNEL_SYSCALL(err, sys_msgsnd, msqid, (struct msgbuf __user *)mb, msgsz, msgflg);
kfree(mb);
return err;
}
asmlinkage long sys32_msgrcv(int msqid,
struct msgbuf32 __user *umsgp32,
size_t msgsz, long msgtyp, int msgflg)
{
struct msgbuf *mb;
struct msgbuf32 mb32;
int err, len;
if ((mb = kmalloc(msgsz + sizeof *mb + 4, GFP_KERNEL)) == NULL)
return -ENOMEM;
KERNEL_SYSCALL(err, sys_msgrcv, msqid, (struct msgbuf __user *)mb, msgsz, msgtyp, msgflg);
if (err >= 0) {
len = err;
mb32.mtype = mb->mtype;
err = put_user(mb32.mtype, &umsgp32->mtype);
err |= copy_to_user(&umsgp32->mtext, mb->mtext, len);
if (err)
err = -EFAULT;
else
err = len;
}
kfree(mb);
return err;
}
asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count)
{
mm_segment_t old_fs = get_fs();
int ret;
off_t of;
if (offset && get_user(of, offset))
return -EFAULT;
set_fs(KERNEL_DS);
ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, count);
set_fs(old_fs);
if (offset && put_user(of, offset))
return -EFAULT;
return ret;
}
asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
{
mm_segment_t old_fs = get_fs();
int ret;
loff_t lof;
if (offset && get_user(lof, offset))
return -EFAULT;
set_fs(KERNEL_DS);
ret = sys_sendfile64(out_fd, in_fd, offset ? (loff_t __user *)&lof : NULL, count);
set_fs(old_fs);
if (offset && put_user(lof, offset))
return -EFAULT;
return ret;
}
struct timex32 {
unsigned int modes; /* mode selector */
int offset; /* time offset (usec) */
int freq; /* frequency offset (scaled ppm) */
int maxerror; /* maximum error (usec) */
int esterror; /* estimated error (usec) */
int status; /* clock command/status */
int constant; /* pll time constant */
int precision; /* clock precision (usec) (read only) */
int tolerance; /* clock frequency tolerance (ppm)
* (read only)
*/
struct compat_timeval time; /* (read only) */
int tick; /* (modified) usecs between clock ticks */
int ppsfreq; /* pps frequency (scaled ppm) (ro) */
int jitter; /* pps jitter (us) (ro) */
int shift; /* interval duration (s) (shift) (ro) */
int stabil; /* pps stability (scaled ppm) (ro) */
int jitcnt; /* jitter limit exceeded (ro) */
int calcnt; /* calibration intervals (ro) */
int errcnt; /* calibration errors (ro) */
int stbcnt; /* stability limit exceeded (ro) */
int :32; int :32; int :32; int :32;
int :32; int :32; int :32; int :32;
int :32; int :32; int :32; int :32;
};
asmlinkage long sys32_adjtimex(struct timex32 __user *txc_p32)
{
struct timex txc;
struct timex32 t32;
int ret;
extern int do_adjtimex(struct timex *txc);
if(copy_from_user(&t32, txc_p32, sizeof(struct timex32)))
return -EFAULT;
#undef CP
#define CP(x) txc.x = t32.x
CP(modes); CP(offset); CP(freq); CP(maxerror); CP(esterror);
CP(status); CP(constant); CP(precision); CP(tolerance);
CP(time.tv_sec); CP(time.tv_usec); CP(tick); CP(ppsfreq); CP(jitter);
CP(shift); CP(stabil); CP(jitcnt); CP(calcnt); CP(errcnt);
CP(stbcnt);
ret = do_adjtimex(&txc);
#undef CP
#define CP(x) t32.x = txc.x
CP(modes); CP(offset); CP(freq); CP(maxerror); CP(esterror);
CP(status); CP(constant); CP(precision); CP(tolerance);
CP(time.tv_sec); CP(time.tv_usec); CP(tick); CP(ppsfreq); CP(jitter);
CP(shift); CP(stabil); CP(jitcnt); CP(calcnt); CP(errcnt);
CP(stbcnt);
return copy_to_user(txc_p32, &t32, sizeof(struct timex32)) ? -EFAULT : ret;
}
struct sysinfo32 {
s32 uptime;
u32 loads[3];
u32 totalram;
u32 freeram;
u32 sharedram;
u32 bufferram;
u32 totalswap;
u32 freeswap;
unsigned short procs;
u32 totalhigh;
u32 freehigh;
u32 mem_unit;
char _f[12];
};
/* We used to call sys_sysinfo and translate the result. But sys_sysinfo
* undoes the good work done elsewhere, and rather than undoing the
* damage, I decided to just duplicate the code from sys_sysinfo here.
*/
asmlinkage int sys32_sysinfo(struct sysinfo32 __user *info)
{
struct sysinfo val;
int err;
unsigned long seq;
/* We don't need a memset here because we copy the
* struct to userspace once element at a time.
*/
do {
seq = read_seqbegin(&xtime_lock);
val.uptime = jiffies / HZ;
val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
val.procs = nr_threads;
} while (read_seqretry(&xtime_lock, seq));
si_meminfo(&val);
si_swapinfo(&val);
err = put_user (val.uptime, &info->uptime);
err |= __put_user (val.loads[0], &info->loads[0]);
err |= __put_user (val.loads[1], &info->loads[1]);
err |= __put_user (val.loads[2], &info->loads[2]);
err |= __put_user (val.totalram, &info->totalram);
err |= __put_user (val.freeram, &info->freeram);
err |= __put_user (val.sharedram, &info->sharedram);
err |= __put_user (val.bufferram, &info->bufferram);
err |= __put_user (val.totalswap, &info->totalswap);
err |= __put_user (val.freeswap, &info->freeswap);
err |= __put_user (val.procs, &info->procs);
err |= __put_user (val.totalhigh, &info->totalhigh);
err |= __put_user (val.freehigh, &info->freehigh);
err |= __put_user (val.mem_unit, &info->mem_unit);
return err ? -EFAULT : 0;
}
/* lseek() needs a wrapper because 'offset' can be negative, but the top
* half of the argument has been zeroed by syscall.S.
*/
asmlinkage int sys32_lseek(unsigned int fd, int offset, unsigned int origin)
{
return sys_lseek(fd, offset, origin);
}
asmlinkage long sys32_semctl(int semid, int semnum, int cmd, union semun arg)
{
union semun u;
if (cmd == SETVAL) {
/* Ugh. arg is a union of int,ptr,ptr,ptr, so is 8 bytes.
* The int should be in the first 4, but our argument
* frobbing has left it in the last 4.
*/
u.val = *((int *)&arg + 1);
return sys_semctl (semid, semnum, cmd, u);
}
return sys_semctl (semid, semnum, cmd, arg);
}
long sys32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char __user *buf,
size_t len)
{
return sys_lookup_dcookie((u64)cookie_high << 32 | cookie_low,
buf, len);
}

View File

@@ -0,0 +1,703 @@
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* System call entry code Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
* Licensed under the GNU GPL.
* thanks to Philipp Rumpf, Mike Shaver and various others
* sorry about the wall, puffin..
*/
#include <asm/offsets.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/psw.h>
#include <asm/thread_info.h>
#include <asm/assembly.h>
#include <asm/processor.h>
/* We fill the empty parts of the gateway page with
* something that will kill the kernel or a
* userspace application.
*/
#define KILL_INSN break 0,0
#include <linux/config.h> /* for CONFIG_SMP */
#ifdef __LP64__
.level 2.0w
#else
.level 1.1
#endif
#ifndef __LP64__
.macro fixup_branch,lbl
b \lbl
.endm
#else
.macro fixup_branch,lbl
ldil L%\lbl, %r1
ldo R%\lbl(%r1), %r1
bv,n %r0(%r1)
.endm
#endif
.text
.import syscall_exit,code
.import syscall_exit_rfi,code
.export linux_gateway_page
/* Linux gateway page is aliased to virtual page 0 in the kernel
* address space. Since it is a gateway page it cannot be
* dereferenced, so null pointers will still fault. We start
* the actual entry point at 0x100. We put break instructions
* at the beginning of the page to trap null indirect function
* pointers.
*/
.align 4096
linux_gateway_page:
/* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
.rept 44
KILL_INSN
.endr
/* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */
/* Light-weight-syscall entry must always be located at 0xb0 */
/* WARNING: Keep this number updated with table size changes */
#define __NR_lws_entries (2)
lws_entry:
/* Unconditional branch to lws_start, located on the
same gateway page */
b,n lws_start
/* Fill from 0xb4 to 0xe0 */
.rept 11
KILL_INSN
.endr
/* This function MUST be located at 0xe0 for glibc's threading
mechanism to work. DO NOT MOVE THIS CODE EVER! */
set_thread_pointer:
gate .+8, %r0 /* increase privilege */
depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
be 0(%sr7,%r31) /* return to user space */
mtctl %r26, %cr27 /* move arg0 to the control register */
/* Increase the chance of trapping if random jumps occur to this
address, fill from 0xf0 to 0x100 */
.rept 4
KILL_INSN
.endr
/* This address must remain fixed at 0x100 for glibc's syscalls to work */
.align 256
linux_gateway_entry:
gate .+8, %r0 /* become privileged */
mtsp %r0,%sr4 /* get kernel space into sr4 */
mtsp %r0,%sr5 /* get kernel space into sr5 */
mtsp %r0,%sr6 /* get kernel space into sr6 */
mfsp %sr7,%r1 /* save user sr7 */
mtsp %r1,%sr3 /* and store it in sr3 */
#ifdef __LP64__
/* for now we can *always* set the W bit on entry to the syscall
* since we don't support wide userland processes. We could
* also save the current SM other than in r0 and restore it on
* exit from the syscall, and also use that value to know
* whether to do narrow or wide syscalls. -PB
*/
ssm PSW_SM_W, %r1
extrd,u %r1,PSW_W_BIT,1,%r1
/* sp must be aligned on 4, so deposit the W bit setting into
* the bottom of sp temporarily */
or,ev %r1,%r30,%r30
b,n 1f
/* The top halves of argument registers must be cleared on syscall
* entry from narrow executable.
*/
depdi 0, 31, 32, %r26
depdi 0, 31, 32, %r25
depdi 0, 31, 32, %r24
depdi 0, 31, 32, %r23
depdi 0, 31, 32, %r22
depdi 0, 31, 32, %r21
1:
#endif
mfctl %cr30,%r1
xor %r1,%r30,%r30 /* ye olde xor trick */
xor %r1,%r30,%r1
xor %r1,%r30,%r30
ldo THREAD_SZ_ALGN+FRAME_SIZE(%r30),%r30 /* set up kernel stack */
/* N.B.: It is critical that we don't set sr7 to 0 until r30
* contains a valid kernel stack pointer. It is also
* critical that we don't start using the kernel stack
* until after sr7 has been set to 0.
*/
mtsp %r0,%sr7 /* get kernel space into sr7 */
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
mfctl %cr30,%r1 /* get task ptr in %r1 */
LDREG TI_TASK(%r1),%r1
/* Save some registers for sigcontext and potential task
switch (see entry.S for the details of which ones are
saved/restored). TASK_PT_PSW is zeroed so we can see whether
a process is on a syscall or not. For an interrupt the real
PSW value is stored. This is needed for gdb and sys_ptrace. */
STREG %r0, TASK_PT_PSW(%r1)
STREG %r2, TASK_PT_GR2(%r1) /* preserve rp */
STREG %r19, TASK_PT_GR19(%r1)
LDREGM -FRAME_SIZE(%r30), %r2 /* get users sp back */
#ifdef __LP64__
extrd,u %r2,63,1,%r19 /* W hidden in bottom bit */
#if 0
xor %r19,%r2,%r2 /* clear bottom bit */
depd,z %r19,1,1,%r19
std %r19,TASK_PT_PSW(%r1)
#endif
#endif
STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */
STREG %r20, TASK_PT_GR20(%r1)
STREG %r21, TASK_PT_GR21(%r1)
STREG %r22, TASK_PT_GR22(%r1)
STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */
STREG %r24, TASK_PT_GR24(%r1) /* 3rd argument */
STREG %r25, TASK_PT_GR25(%r1) /* 2nd argument */
STREG %r26, TASK_PT_GR26(%r1) /* 1st argument */
STREG %r27, TASK_PT_GR27(%r1) /* user dp */
STREG %r28, TASK_PT_GR28(%r1) /* return value 0 */
STREG %r28, TASK_PT_ORIG_R28(%r1) /* return value 0 (saved for signals) */
STREG %r29, TASK_PT_GR29(%r1) /* return value 1 */
STREG %r31, TASK_PT_GR31(%r1) /* preserve syscall return ptr */
ldo TASK_PT_FR0(%r1), %r27 /* save fpregs from the kernel */
save_fp %r27 /* or potential task switch */
mfctl %cr11, %r27 /* i.e. SAR */
STREG %r27, TASK_PT_SAR(%r1)
loadgp
#ifdef __LP64__
ldo -16(%r30),%r29 /* Reference param save area */
copy %r19,%r2 /* W bit back to r2 */
#else
/* no need to save these on stack in wide mode because the first 8
* args are passed in registers */
stw %r22, -52(%r30) /* 5th argument */
stw %r21, -56(%r30) /* 6th argument */
#endif
/* Are we being ptraced? */
mfctl %cr30, %r1
LDREG TI_TASK(%r1),%r1
LDREG TASK_PTRACE(%r1), %r1
bb,<,n %r1,31,.Ltracesys
/* Note! We cannot use the syscall table that is mapped
nearby since the gateway page is mapped execute-only. */
#ifdef __LP64__
ldil L%sys_call_table, %r1
or,= %r2,%r2,%r2
addil L%(sys_call_table64-sys_call_table), %r1
ldo R%sys_call_table(%r1), %r19
or,= %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
ldil L%sys_call_table, %r1
ldo R%sys_call_table(%r1), %r19
#endif
comiclr,>>= __NR_Linux_syscalls, %r20, %r0
b,n .Lsyscall_nosys
LDREGX %r20(%r19), %r19
/* If this is a sys_rt_sigreturn call, and the signal was received
* when not in_syscall, then we want to return via syscall_exit_rfi,
* not syscall_exit. Signal no. in r20, in_syscall in r25 (see
* trampoline code in signal.c).
*/
ldi __NR_rt_sigreturn,%r2
comb,= %r2,%r20,.Lrt_sigreturn
.Lin_syscall:
ldil L%syscall_exit,%r2
be 0(%sr7,%r19)
ldo R%syscall_exit(%r2),%r2
.Lrt_sigreturn:
comib,<> 0,%r25,.Lin_syscall
ldil L%syscall_exit_rfi,%r2
be 0(%sr7,%r19)
ldo R%syscall_exit_rfi(%r2),%r2
/* Note! Because we are not running where we were linked, any
calls to functions external to this file must be indirect. To
be safe, we apply the opposite rule to functions within this
file, with local labels given to them to ensure correctness. */
.Lsyscall_nosys:
syscall_nosys:
ldil L%syscall_exit,%r1
be R%syscall_exit(%sr7,%r1)
ldo -ENOSYS(%r0),%r28 /* set errno */
/* Warning! This trace code is a virtual duplicate of the code above so be
* sure to maintain both! */
.Ltracesys:
tracesys:
/* Need to save more registers so the debugger can see where we
* are. This saves only the lower 8 bits of PSW, so that the C
* bit is still clear on syscalls, and the D bit is set if this
* full register save path has been executed. We check the D
* bit on syscall_return_rfi to determine which registers to
* restore. An interrupt results in a full PSW saved with the
* C bit set, a non-straced syscall entry results in C and D clear
* in the saved PSW.
*/
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
ssm 0,%r2
STREG %r2,TASK_PT_PSW(%r1) /* Lower 8 bits only!! */
mfsp %sr0,%r2
STREG %r2,TASK_PT_SR0(%r1)
mfsp %sr1,%r2
STREG %r2,TASK_PT_SR1(%r1)
mfsp %sr2,%r2
STREG %r2,TASK_PT_SR2(%r1)
mfsp %sr3,%r2
STREG %r2,TASK_PT_SR3(%r1)
STREG %r2,TASK_PT_SR4(%r1)
STREG %r2,TASK_PT_SR5(%r1)
STREG %r2,TASK_PT_SR6(%r1)
STREG %r2,TASK_PT_SR7(%r1)
STREG %r2,TASK_PT_IASQ0(%r1)
STREG %r2,TASK_PT_IASQ1(%r1)
LDREG TASK_PT_GR31(%r1),%r2
STREG %r2,TASK_PT_IAOQ0(%r1)
ldo 4(%r2),%r2
STREG %r2,TASK_PT_IAOQ1(%r1)
ldo TASK_REGS(%r1),%r2
/* reg_save %r2 */
STREG %r3,PT_GR3(%r2)
STREG %r4,PT_GR4(%r2)
STREG %r5,PT_GR5(%r2)
STREG %r6,PT_GR6(%r2)
STREG %r7,PT_GR7(%r2)
STREG %r8,PT_GR8(%r2)
STREG %r9,PT_GR9(%r2)
STREG %r10,PT_GR10(%r2)
STREG %r11,PT_GR11(%r2)
STREG %r12,PT_GR12(%r2)
STREG %r13,PT_GR13(%r2)
STREG %r14,PT_GR14(%r2)
STREG %r15,PT_GR15(%r2)
STREG %r16,PT_GR16(%r2)
STREG %r17,PT_GR17(%r2)
STREG %r18,PT_GR18(%r2)
/* Finished saving things for the debugger */
ldil L%syscall_trace,%r1
ldil L%tracesys_next,%r2
be R%syscall_trace(%sr7,%r1)
ldo R%tracesys_next(%r2),%r2
tracesys_next:
ldil L%sys_call_table,%r1
ldo R%sys_call_table(%r1), %r19
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
LDREG TASK_PT_GR20(%r1), %r20
LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */
LDREG TASK_PT_GR25(%r1), %r25
LDREG TASK_PT_GR24(%r1), %r24
LDREG TASK_PT_GR23(%r1), %r23
#ifdef __LP64__
LDREG TASK_PT_GR22(%r1), %r22
LDREG TASK_PT_GR21(%r1), %r21
ldo -16(%r30),%r29 /* Reference param save area */
#endif
comiclr,>>= __NR_Linux_syscalls, %r20, %r0
b,n .Lsyscall_nosys
LDREGX %r20(%r19), %r19
/* If this is a sys_rt_sigreturn call, and the signal was received
* when not in_syscall, then we want to return via syscall_exit_rfi,
* not syscall_exit. Signal no. in r20, in_syscall in r25 (see
* trampoline code in signal.c).
*/
ldi __NR_rt_sigreturn,%r2
comb,= %r2,%r20,.Ltrace_rt_sigreturn
.Ltrace_in_syscall:
ldil L%tracesys_exit,%r2
be 0(%sr7,%r19)
ldo R%tracesys_exit(%r2),%r2
/* Do *not* call this function on the gateway page, because it
makes a direct call to syscall_trace. */
tracesys_exit:
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
#ifdef __LP64__
ldo -16(%r30),%r29 /* Reference param save area */
#endif
bl syscall_trace, %r2
STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
LDREG TASK_PT_GR28(%r1), %r28 /* Restore return val. */
ldil L%syscall_exit,%r1
be,n R%syscall_exit(%sr7,%r1)
.Ltrace_rt_sigreturn:
comib,<> 0,%r25,.Ltrace_in_syscall
ldil L%tracesys_sigexit,%r2
be 0(%sr7,%r19)
ldo R%tracesys_sigexit(%r2),%r2
tracesys_sigexit:
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG 0(%r1), %r1
#ifdef __LP64__
ldo -16(%r30),%r29 /* Reference param save area */
#endif
bl syscall_trace, %r2
nop
ldil L%syscall_exit_rfi,%r1
be,n R%syscall_exit_rfi(%sr7,%r1)
/*********************************************************
Light-weight-syscall code
r20 - lws number
r26,r25,r24,r23,r22 - Input registers
r28 - Function return register
r21 - Error code.
Scracth: Any of the above that aren't being
currently used, including r1.
Return pointer: r31 (Not usable)
Error codes returned by entry path:
ENOSYS - r20 was an invalid LWS number.
*********************************************************/
lws_start:
/* Gate and ensure we return to userspace */
gate .+8, %r0
depi 3, 31, 2, %r31 /* Ensure we return to userspace */
#ifdef __LP64__
/* FIXME: If we are a 64-bit kernel just
* turn this on unconditionally.
*/
ssm PSW_SM_W, %r1
extrd,u %r1,PSW_W_BIT,1,%r1
/* sp must be aligned on 4, so deposit the W bit setting into
* the bottom of sp temporarily */
or,ev %r1,%r30,%r30
/* Clip LWS number to a 32-bit value always */
depdi 0, 31, 32, %r20
#endif
/* Is the lws entry number valid? */
comiclr,>>= __NR_lws_entries, %r20, %r0
b,n lws_exit_nosys
/* WARNING: Trashing sr2 and sr3 */
mfsp %sr7,%r1 /* get userspace into sr3 */
mtsp %r1,%sr3
mtsp %r0,%sr2 /* get kernel space into sr2 */
/* Load table start */
ldil L%lws_table, %r1
ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
LDREGX %r20(%sr2,r28), %r21 /* Scratch use of r21 */
/* Jump to lws, lws table pointers already relocated */
be,n 0(%sr2,%r21)
lws_exit_nosys:
ldo -ENOSYS(%r0),%r21 /* set errno */
/* Fall through: Return to userspace */
lws_exit:
#ifdef __LP64__
/* decide whether to reset the wide mode bit
*
* For a syscall, the W bit is stored in the lowest bit
* of sp. Extract it and reset W if it is zero */
extrd,u,*<> %r30,63,1,%r1
rsm PSW_SM_W, %r0
/* now reset the lowest bit of sp if it was set */
xor %r30,%r1,%r30
#endif
be,n 0(%sr3, %r31)
/***************************************************
Implementing CAS as an atomic operation:
%r26 - Address to examine
%r25 - Old value to check (old)
%r24 - New value to set (new)
%r28 - Return prev through this register.
%r21 - Kernel error code
If debugging is DISabled:
%r21 has the following meanings:
EAGAIN - CAS is busy, ldcw failed, try again.
EFAULT - Read or write failed.
If debugging is enabled:
EDEADLOCK - CAS called recursively.
EAGAIN && r28 == 1 - CAS is busy. Lock contended.
EAGAIN && r28 == 2 - CAS is busy. ldcw failed.
EFAULT - Read or write failed.
Scratch: r20, r28, r1
****************************************************/
/* Do not enable LWS debugging */
#define ENABLE_LWS_DEBUG 0
/* ELF64 Process entry path */
lws_compare_and_swap64:
#ifdef __LP64__
b,n lws_compare_and_swap
#else
/* If we are not a 64-bit kernel, then we don't
* implement having 64-bit input registers
*/
b,n lws_exit_nosys
#endif
/* ELF32 Process entry path */
lws_compare_and_swap32:
#ifdef __LP64__
/* Clip all the input registers */
depdi 0, 31, 32, %r26
depdi 0, 31, 32, %r25
depdi 0, 31, 32, %r24
#endif
lws_compare_and_swap:
#ifdef CONFIG_SMP
/* Load start of lock table */
ldil L%lws_lock_start, %r20
ldo R%lws_lock_start(%r20), %r28
/* Extract four bits from r26 and hash lock (Bits 4-7) */
extru %r26, 27, 4, %r20
/* Find lock to use, the hash is either one of 0 to
15, multiplied by 16 (keep it 16-byte aligned)
and add to the lock table offset. */
shlw %r20, 4, %r20
add %r20, %r28, %r20
# ifdef ENABLE_LWS_DEBUG
/*
DEBUG, check for deadlock!
If the thread register values are the same
then we were the one that locked it last and
this is a recurisve call that will deadlock.
We *must* giveup this call and fail.
*/
ldw 4(%sr2,%r20), %r28 /* Load thread register */
mfctl %cr27, %r21 /* Get current thread register */
cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */
b lws_exit /* Return error! */
ldo -EDEADLOCK(%r0), %r21
cas_lock:
cmpb,=,n %r0, %r28, cas_nocontend /* Is nobody using it? */
ldo 1(%r0), %r28 /* 1st case */
b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
cas_nocontend:
# endif
/* ENABLE_LWS_DEBUG */
ldcw 0(%sr2,%r20), %r28 /* Try to acquire the lock */
cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */
cas_wouldblock:
ldo 2(%r0), %r28 /* 2nd case */
b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
#endif
/* CONFIG_SMP */
/*
prev = *addr;
if ( prev == old )
*addr = new;
return prev;
*/
/* NOTES:
This all works becuse intr_do_signal
and schedule both check the return iasq
and see that we are on the kernel page
so this process is never scheduled off
or is ever sent any signal of any sort,
thus it is wholly atomic from usrspaces
perspective
*/
cas_action:
#if defined CONFIG_SMP && defined ENABLE_LWS_DEBUG
/* DEBUG */
mfctl %cr27, %r1
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
1: ldw 0(%sr3,%r26), %r28
sub,<> %r28, %r25, %r0
2: stw %r24, 0(%sr3,%r26)
#ifdef CONFIG_SMP
/* Free lock */
stw %r20, 0(%sr2,%r20)
# ifdef ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
# endif
#endif
/* Return to userspace, set no error */
b lws_exit
copy %r0, %r21
3:
/* Error occured on load or store */
#ifdef CONFIG_SMP
/* Free lock */
stw %r20, 0(%sr2,%r20)
# ifdef ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
# endif
#endif
b lws_exit
ldo -EFAULT(%r0),%r21 /* set errno */
nop
nop
nop
nop
/* Two exception table entries, one for the load,
the other for the store. Either return -EFAULT.
Each of the entries must be relocated. */
.section __ex_table,"aw"
#ifdef __LP64__
/* Pad the address calculation */
.word 0,(2b - linux_gateway_page)
.word 0,(3b - linux_gateway_page)
#else
.word (2b - linux_gateway_page)
.word (3b - linux_gateway_page)
#endif
.previous
.section __ex_table,"aw"
#ifdef __LP64__
/* Pad the address calculation */
.word 0,(1b - linux_gateway_page)
.word 0,(3b - linux_gateway_page)
#else
.word (1b - linux_gateway_page)
.word (3b - linux_gateway_page)
#endif
.previous
end_compare_and_swap:
/* Make sure nothing else is placed on this page */
.align 4096
.export end_linux_gateway_page
end_linux_gateway_page:
/* Relocate symbols assuming linux_gateway_page is mapped
to virtual address 0x0 */
#ifdef __LP64__
/* FIXME: The code will always be on the gateay page
and thus it will be on the first 4k, the
assembler seems to think that the final
subtraction result is only a word in
length, so we pad the value.
*/
#define LWS_ENTRY(_name_) .word 0,(lws_##_name_ - linux_gateway_page)
#else
#define LWS_ENTRY(_name_) .word (lws_##_name_ - linux_gateway_page)
#endif
.align 4096
/* Light-weight-syscall table */
/* Start of lws table. */
.export lws_table
.Llws_table:
lws_table:
LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */
LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */
/* End of lws table */
.align 4096
.export sys_call_table
.Lsys_call_table:
sys_call_table:
#include "syscall_table.S"
#ifdef __LP64__
.align 4096
.export sys_call_table64
.Lsys_call_table64:
sys_call_table64:
#define SYSCALL_TABLE_64BIT
#include "syscall_table.S"
#endif
#ifdef CONFIG_SMP
/*
All light-weight-syscall atomic operations
will use this set of locks
*/
.section .data
.align 4096
.export lws_lock_start
.Llws_lock_start:
lws_lock_start:
/* lws locks */
.align 16
.rept 16
/* Keep locks aligned at 16-bytes */
.word 1
.word 0
.word 0
.word 0
.endr
.previous
#endif
/* CONFIG_SMP for lws_lock_start */
.end

View File

@@ -0,0 +1,372 @@
/* System Call Table
*
* Copyright (C) 1999-2004 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 Alan Modra <amodra at parisc-linux.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
* Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
* Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
* Copyright (C) 2000 Grant Grundler <grundler at parisc-linux.org>
* Copyright (C) 2001 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
* Copyright (C) 2001 Helge Deller <deller at parisc-linux.org>
* Copyright (C) 2000-2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#undef ENTRY_SAME
#undef ENTRY_DIFF
#undef ENTRY_UHOH
#undef ENTRY_COMP
#undef ENTRY_OURS
#if defined(__LP64__) && !defined(SYSCALL_TABLE_64BIT)
/* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and
* narrow palinux. Use ENTRY_DIFF for those where a 32-bit specific
* implementation is required on wide palinux. Use ENTRY_COMP where
* the compatability layer has a useful 32-bit implementation.
*/
#define ENTRY_SAME(_name_) .dword sys_##_name_
#define ENTRY_DIFF(_name_) .dword sys32_##_name_
#define ENTRY_UHOH(_name_) .dword sys32_##unimplemented
#define ENTRY_OURS(_name_) .dword parisc_##_name_
#define ENTRY_COMP(_name_) .dword compat_sys_##_name_
#elif defined(__LP64__) && defined(SYSCALL_TABLE_64BIT)
#define ENTRY_SAME(_name_) .dword sys_##_name_
#define ENTRY_DIFF(_name_) .dword sys_##_name_
#define ENTRY_UHOH(_name_) .dword sys_##_name_
#define ENTRY_OURS(_name_) .dword sys_##_name_
#define ENTRY_COMP(_name_) .dword sys_##_name_
#else
#define ENTRY_SAME(_name_) .word sys_##_name_
#define ENTRY_DIFF(_name_) .word sys_##_name_
#define ENTRY_UHOH(_name_) .word sys_##_name_
#define ENTRY_OURS(_name_) .word parisc_##_name_
#define ENTRY_COMP(_name_) .word sys_##_name_
#endif
ENTRY_SAME(restart_syscall) /* 0 */
ENTRY_SAME(exit)
ENTRY_SAME(fork_wrapper)
ENTRY_SAME(read)
ENTRY_SAME(write)
ENTRY_SAME(open) /* 5 */
ENTRY_SAME(close)
ENTRY_SAME(waitpid)
ENTRY_SAME(creat)
ENTRY_SAME(link)
ENTRY_SAME(unlink) /* 10 */
ENTRY_DIFF(execve_wrapper)
ENTRY_SAME(chdir)
/* See comments in kernel/time.c!!! Maybe we don't need this? */
ENTRY_COMP(time)
ENTRY_SAME(mknod)
ENTRY_SAME(chmod) /* 15 */
ENTRY_SAME(lchown)
ENTRY_SAME(socket)
/* struct stat is MAYBE identical wide and narrow ?? */
ENTRY_COMP(newstat)
ENTRY_DIFF(lseek)
ENTRY_SAME(getpid) /* 20 */
/* the 'void * data' parameter may need re-packing in wide */
ENTRY_COMP(mount)
/* concerned about struct sockaddr in wide/narrow */
/* ---> I think sockaddr is OK unless the compiler packs the struct */
/* differently to align the char array */
ENTRY_SAME(bind)
ENTRY_SAME(setuid)
ENTRY_SAME(getuid)
ENTRY_COMP(stime) /* 25 */
ENTRY_SAME(ptrace)
ENTRY_SAME(alarm)
/* see stat comment */
ENTRY_COMP(newfstat)
ENTRY_SAME(pause)
/* struct utimbuf uses time_t which might vary */
ENTRY_COMP(utime) /* 30 */
/* struct sockaddr... */
ENTRY_SAME(connect)
ENTRY_SAME(listen)
ENTRY_SAME(access)
ENTRY_SAME(nice)
/* struct sockaddr... */
ENTRY_SAME(accept) /* 35 */
ENTRY_SAME(sync)
ENTRY_SAME(kill)
ENTRY_SAME(rename)
ENTRY_SAME(mkdir)
ENTRY_SAME(rmdir) /* 40 */
ENTRY_SAME(dup)
ENTRY_SAME(pipe)
ENTRY_COMP(times)
/* struct sockaddr... */
ENTRY_SAME(getsockname)
/* it seems possible brk() could return a >4G pointer... */
ENTRY_SAME(brk) /* 45 */
ENTRY_SAME(setgid)
ENTRY_SAME(getgid)
ENTRY_SAME(signal)
ENTRY_SAME(geteuid)
ENTRY_SAME(getegid) /* 50 */
ENTRY_SAME(acct)
ENTRY_SAME(umount)
/* struct sockaddr... */
ENTRY_SAME(getpeername)
ENTRY_COMP(ioctl)
ENTRY_COMP(fcntl) /* 55 */
ENTRY_SAME(socketpair)
ENTRY_SAME(setpgid)
ENTRY_SAME(send)
ENTRY_SAME(newuname)
ENTRY_SAME(umask) /* 60 */
ENTRY_SAME(chroot)
ENTRY_SAME(ustat)
ENTRY_SAME(dup2)
ENTRY_SAME(getppid)
ENTRY_SAME(getpgrp) /* 65 */
ENTRY_SAME(setsid)
ENTRY_SAME(pivot_root)
/* I don't like this */
ENTRY_UHOH(sgetmask)
ENTRY_UHOH(ssetmask)
ENTRY_SAME(setreuid) /* 70 */
ENTRY_SAME(setregid)
ENTRY_SAME(mincore)
ENTRY_COMP(sigpending)
ENTRY_SAME(sethostname)
/* Following 3 have linux-common-code structs containing longs -( */
ENTRY_COMP(setrlimit) /* 75 */
ENTRY_COMP(getrlimit)
ENTRY_COMP(getrusage)
/* struct timeval and timezone are maybe?? consistent wide and narrow */
ENTRY_DIFF(gettimeofday)
ENTRY_DIFF(settimeofday)
ENTRY_SAME(getgroups) /* 80 */
ENTRY_SAME(setgroups)
/* struct socketaddr... */
ENTRY_SAME(sendto)
ENTRY_SAME(symlink)
/* see stat comment */
ENTRY_COMP(newlstat)
ENTRY_SAME(readlink) /* 85 */
ENTRY_SAME(ni_syscall) /* was uselib */
ENTRY_SAME(swapon)
ENTRY_SAME(reboot)
ENTRY_SAME(mmap2)
ENTRY_SAME(mmap) /* 90 */
ENTRY_SAME(munmap)
ENTRY_SAME(truncate)
ENTRY_SAME(ftruncate)
ENTRY_SAME(fchmod)
ENTRY_SAME(fchown) /* 95 */
ENTRY_SAME(getpriority)
ENTRY_SAME(setpriority)
ENTRY_SAME(recv)
ENTRY_COMP(statfs)
ENTRY_COMP(fstatfs) /* 100 */
ENTRY_SAME(stat64)
ENTRY_SAME(ni_syscall) /* was socketcall */
ENTRY_SAME(syslog)
/* even though manpage says struct timeval contains longs, ours has
* time_t and suseconds_t -- both of which are safe wide/narrow */
ENTRY_COMP(setitimer)
ENTRY_COMP(getitimer) /* 105 */
ENTRY_SAME(capget)
ENTRY_SAME(capset)
ENTRY_OURS(pread64)
ENTRY_OURS(pwrite64)
ENTRY_SAME(getcwd) /* 110 */
ENTRY_SAME(vhangup)
ENTRY_SAME(fstat64)
ENTRY_SAME(vfork_wrapper)
/* struct rusage contains longs... */
ENTRY_COMP(wait4)
ENTRY_SAME(swapoff) /* 115 */
ENTRY_DIFF(sysinfo)
ENTRY_SAME(shutdown)
ENTRY_SAME(fsync)
ENTRY_SAME(madvise)
ENTRY_SAME(clone_wrapper) /* 120 */
ENTRY_SAME(setdomainname)
ENTRY_DIFF(sendfile)
/* struct sockaddr... */
ENTRY_SAME(recvfrom)
/* struct timex contains longs */
ENTRY_DIFF(adjtimex)
ENTRY_SAME(mprotect) /* 125 */
/* old_sigset_t forced to 32 bits. Beware glibc sigset_t */
ENTRY_COMP(sigprocmask)
ENTRY_SAME(ni_syscall) /* create_module */
ENTRY_SAME(init_module)
ENTRY_SAME(delete_module)
ENTRY_SAME(ni_syscall) /* 130: get_kernel_syms */
/* time_t inside struct dqblk */
ENTRY_SAME(quotactl)
ENTRY_SAME(getpgid)
ENTRY_SAME(fchdir)
ENTRY_SAME(bdflush)
ENTRY_SAME(sysfs) /* 135 */
ENTRY_SAME(personality)
ENTRY_SAME(ni_syscall) /* for afs_syscall */
ENTRY_SAME(setfsuid)
ENTRY_SAME(setfsgid)
/* I think this might work */
ENTRY_SAME(llseek) /* 140 */
/* struct linux_dirent has longs, like 'unsigned long d_ino' which
* almost definitely should be 'ino_t d_ino' but it's too late now */
ENTRY_DIFF(getdents)
/* it is POSSIBLE that select will be OK because even though fd_set
* contains longs, the macros and sizes are clever. */
ENTRY_COMP(select)
ENTRY_SAME(flock)
ENTRY_SAME(msync)
/* struct iovec contains pointers */
ENTRY_COMP(readv) /* 145 */
ENTRY_COMP(writev)
ENTRY_SAME(getsid)
ENTRY_SAME(fdatasync)
/* struct __sysctl_args is a mess */
ENTRY_DIFF(sysctl)
ENTRY_SAME(mlock) /* 150 */
ENTRY_SAME(munlock)
ENTRY_SAME(mlockall)
ENTRY_SAME(munlockall)
/* struct sched_param is ok for now */
ENTRY_SAME(sched_setparam)
ENTRY_SAME(sched_getparam) /* 155 */
ENTRY_SAME(sched_setscheduler)
ENTRY_SAME(sched_getscheduler)
ENTRY_SAME(sched_yield)
ENTRY_SAME(sched_get_priority_max)
ENTRY_SAME(sched_get_priority_min) /* 160 */
/* These 2 would've worked if someone had defined struct timespec
* carefully, like timeval for example (which is about the same).
* Unfortunately it contains a long :-( */
ENTRY_DIFF(sched_rr_get_interval)
ENTRY_COMP(nanosleep)
ENTRY_SAME(mremap)
ENTRY_SAME(setresuid)
ENTRY_SAME(getresuid) /* 165 */
ENTRY_DIFF(sigaltstack_wrapper)
ENTRY_SAME(ni_syscall) /* query_module */
ENTRY_SAME(poll)
/* structs contain pointers and an in_addr... */
ENTRY_COMP(nfsservctl)
ENTRY_SAME(setresgid) /* 170 */
ENTRY_SAME(getresgid)
ENTRY_SAME(prctl)
/* signals need a careful review */
ENTRY_SAME(rt_sigreturn_wrapper)
ENTRY_DIFF(rt_sigaction)
ENTRY_DIFF(rt_sigprocmask) /* 175 */
ENTRY_DIFF(rt_sigpending)
ENTRY_COMP(rt_sigtimedwait)
/* even though the struct siginfo_t is different, it appears like
* all the paths use values which should be same wide and narrow.
* Also the struct is padded to 128 bytes which means we don't have
* to worry about faulting trying to copy in a larger 64-bit
* struct from a 32-bit user-space app.
*/
ENTRY_SAME(rt_sigqueueinfo)
ENTRY_SAME(rt_sigsuspend_wrapper) /* not really SAME -- see the code */
ENTRY_SAME(chown) /* 180 */
/* setsockopt() used by iptables: SO_SET_REPLACE/SO_SET_ADD_COUNTERS */
ENTRY_COMP(setsockopt)
ENTRY_SAME(getsockopt)
ENTRY_COMP(sendmsg)
ENTRY_COMP(recvmsg)
ENTRY_SAME(semop) /* 185 */
ENTRY_SAME(semget)
ENTRY_DIFF(semctl)
ENTRY_DIFF(msgsnd)
ENTRY_DIFF(msgrcv)
ENTRY_SAME(msgget) /* 190 */
ENTRY_SAME(msgctl)
ENTRY_SAME(shmat_wrapper)
ENTRY_SAME(shmdt)
ENTRY_SAME(shmget)
ENTRY_SAME(shmctl) /* 195 */
ENTRY_SAME(ni_syscall) /* streams1 */
ENTRY_SAME(ni_syscall) /* streams2 */
ENTRY_SAME(lstat64)
ENTRY_OURS(truncate64)
ENTRY_OURS(ftruncate64) /* 200 */
ENTRY_SAME(getdents64)
ENTRY_COMP(fcntl64)
ENTRY_SAME(ni_syscall) /* attrctl -- dead */
ENTRY_SAME(ni_syscall) /* acl_get -- dead */
ENTRY_SAME(ni_syscall) /* 205 (acl_set -- dead) */
ENTRY_SAME(gettid)
ENTRY_OURS(readahead)
ENTRY_SAME(tkill)
ENTRY_SAME(sendfile64)
ENTRY_COMP(futex) /* 210 */
ENTRY_COMP(sched_setaffinity)
ENTRY_COMP(sched_getaffinity)
ENTRY_SAME(ni_syscall) /* set_thread_area */
ENTRY_SAME(ni_syscall) /* get_thread_area */
ENTRY_SAME(io_setup) /* 215 */
ENTRY_SAME(io_destroy)
ENTRY_SAME(io_getevents)
ENTRY_SAME(io_submit)
ENTRY_SAME(io_cancel)
ENTRY_SAME(alloc_hugepages) /* 220 */
ENTRY_SAME(free_hugepages)
ENTRY_SAME(exit_group)
ENTRY_DIFF(lookup_dcookie)
ENTRY_SAME(epoll_create)
ENTRY_SAME(epoll_ctl) /* 225 */
ENTRY_SAME(epoll_wait)
ENTRY_SAME(remap_file_pages)
ENTRY_SAME(semtimedop)
ENTRY_SAME(mq_open)
ENTRY_SAME(mq_unlink) /* 230 */
ENTRY_SAME(mq_timedsend)
ENTRY_SAME(mq_timedreceive)
ENTRY_SAME(mq_notify)
ENTRY_SAME(mq_getsetattr)
ENTRY_COMP(waitid) /* 235 */
ENTRY_OURS(fadvise64_64)
ENTRY_SAME(set_tid_address)
ENTRY_SAME(setxattr)
ENTRY_SAME(lsetxattr)
ENTRY_SAME(fsetxattr) /* 240 */
ENTRY_SAME(getxattr)
ENTRY_SAME(lgetxattr)
ENTRY_SAME(fgetxattr)
ENTRY_SAME(listxattr)
ENTRY_SAME(llistxattr) /* 245 */
ENTRY_SAME(flistxattr)
ENTRY_SAME(removexattr)
ENTRY_SAME(lremovexattr)
ENTRY_SAME(fremovexattr)
ENTRY_COMP(timer_create) /* 250 */
ENTRY_COMP(timer_settime)
ENTRY_COMP(timer_gettime)
ENTRY_SAME(timer_getoverrun)
ENTRY_SAME(timer_delete)
ENTRY_COMP(clock_settime) /* 255 */
ENTRY_COMP(clock_gettime)
ENTRY_COMP(clock_getres)
ENTRY_COMP(clock_nanosleep)
ENTRY_SAME(tgkill)
ENTRY_COMP(mbind) /* 260 */
ENTRY_COMP(get_mempolicy)
ENTRY_COMP(set_mempolicy)
/* Nothing yet */

243
arch/parisc/kernel/time.c Normal file
View File

@@ -0,0 +1,243 @@
/*
* linux/arch/parisc/kernel/time.c
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
* Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
* Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org)
*
* 1994-07-02 Alan Modra
* fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
* 1998-12-20 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/profile.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/param.h>
#include <asm/pdc.h>
#include <asm/led.h>
#include <linux/timex.h>
u64 jiffies_64 = INITIAL_JIFFIES;
EXPORT_SYMBOL(jiffies_64);
/* xtime and wall_jiffies keep wall-clock time */
extern unsigned long wall_jiffies;
static long clocktick; /* timer cycles per tick */
static long halftick;
#ifdef CONFIG_SMP
extern void smp_do_timer(struct pt_regs *regs);
#endif
irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
long now;
long next_tick;
int nticks;
int cpu = smp_processor_id();
profile_tick(CPU_PROFILING, regs);
now = mfctl(16);
/* initialize next_tick to time at last clocktick */
next_tick = cpu_data[cpu].it_value;
/* since time passes between the interrupt and the mfctl()
* above, it is never true that last_tick + clocktick == now. If we
* never miss a clocktick, we could set next_tick = last_tick + clocktick
* but maybe we'll miss ticks, hence the loop.
*
* Variables are *signed*.
*/
nticks = 0;
while((next_tick - now) < halftick) {
next_tick += clocktick;
nticks++;
}
mtctl(next_tick, 16);
cpu_data[cpu].it_value = next_tick;
while (nticks--) {
#ifdef CONFIG_SMP
smp_do_timer(regs);
#else
update_process_times(user_mode(regs));
#endif
if (cpu == 0) {
write_seqlock(&xtime_lock);
do_timer(regs);
write_sequnlock(&xtime_lock);
}
}
#ifdef CONFIG_CHASSIS_LCD_LED
/* Only schedule the led tasklet on cpu 0, and only if it
* is enabled.
*/
if (cpu == 0 && !atomic_read(&led_tasklet.count))
tasklet_schedule(&led_tasklet);
#endif
/* check soft power switch status */
if (cpu == 0 && !atomic_read(&power_tasklet.count))
tasklet_schedule(&power_tasklet);
return IRQ_HANDLED;
}
/*** converted from ia64 ***/
/*
* Return the number of micro-seconds that elapsed since the last
* update to wall time (aka xtime aka wall_jiffies). The xtime_lock
* must be at least read-locked when calling this routine.
*/
static inline unsigned long
gettimeoffset (void)
{
#ifndef CONFIG_SMP
/*
* FIXME: This won't work on smp because jiffies are updated by cpu 0.
* Once parisc-linux learns the cr16 difference between processors,
* this could be made to work.
*/
long last_tick;
long elapsed_cycles;
/* it_value is the intended time of the next tick */
last_tick = cpu_data[smp_processor_id()].it_value;
/* Subtract one tick and account for possible difference between
* when we expected the tick and when it actually arrived.
* (aka wall vs real)
*/
last_tick -= clocktick * (jiffies - wall_jiffies + 1);
elapsed_cycles = mfctl(16) - last_tick;
/* the precision of this math could be improved */
return elapsed_cycles / (PAGE0->mem_10msec / 10000);
#else
return 0;
#endif
}
void
do_gettimeofday (struct timeval *tv)
{
unsigned long flags, seq, usec, sec;
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
usec = gettimeoffset();
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
while (usec >= 1000000) {
usec -= 1000000;
++sec;
}
tv->tv_sec = sec;
tv->tv_usec = usec;
}
EXPORT_SYMBOL(do_gettimeofday);
int
do_settimeofday (struct timespec *tv)
{
time_t wtm_sec, sec = tv->tv_sec;
long wtm_nsec, nsec = tv->tv_nsec;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irq(&xtime_lock);
{
/*
* This is revolting. We need to set "xtime"
* correctly. However, the value in this location is
* the value at the most recent update of wall time.
* Discover what correction gettimeofday would have
* done, and then undo it!
*/
nsec -= gettimeoffset() * 1000;
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT;
time_esterror = NTP_PHASE_LIMIT;
}
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
EXPORT_SYMBOL(do_settimeofday);
/*
* XXX: We can do better than this.
* Returns nanoseconds
*/
unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}
void __init time_init(void)
{
unsigned long next_tick;
static struct pdc_tod tod_data;
clocktick = (100 * PAGE0->mem_10msec) / HZ;
halftick = clocktick / 2;
/* Setup clock interrupt timing */
next_tick = mfctl(16);
next_tick += clocktick;
cpu_data[smp_processor_id()].it_value = next_tick;
/* kick off Itimer (CR16) */
mtctl(next_tick, 16);
if(pdc_tod_read(&tod_data) == 0) {
write_seqlock_irq(&xtime_lock);
xtime.tv_sec = tod_data.tod_sec;
xtime.tv_nsec = tod_data.tod_usec * 1000;
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
write_sequnlock_irq(&xtime_lock);
} else {
printk(KERN_ERR "Error reading tod clock\n");
xtime.tv_sec = 0;
xtime.tv_nsec = 0;
}
}

View File

@@ -0,0 +1,37 @@
/*
* arch/parisc/kernel/topology.c - Populate driverfs with topology information
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/cpu.h>
static struct cpu cpu_devices[NR_CPUS];
static int __init topology_init(void)
{
struct node *parent = NULL;
int num;
for_each_present_cpu(num) {
register_cpu(&cpu_devices[num], num, parent);
}
return 0;
}
subsys_initcall(topology_init);

834
arch/parisc/kernel/traps.c Normal file
View File

@@ -0,0 +1,834 @@
/*
* linux/arch/parisc/traps.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
*/
/*
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'asm.s'.
*/
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/console.h>
#include <linux/kallsyms.h>
#include <asm/assembly.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/traps.h>
#include <asm/unaligned.h>
#include <asm/atomic.h>
#include <asm/smp.h>
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
#include <asm/unwind.h>
#include "../math-emu/math-emu.h" /* for handle_fpe() */
#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
/* dumped to the console via printk) */
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
DEFINE_SPINLOCK(pa_dbit_lock);
#endif
int printbinary(char *buf, unsigned long x, int nbits)
{
unsigned long mask = 1UL << (nbits - 1);
while (mask != 0) {
*buf++ = (mask & x ? '1' : '0');
mask >>= 1;
}
*buf = '\0';
return nbits;
}
#ifdef __LP64__
#define RFMT "%016lx"
#else
#define RFMT "%08lx"
#endif
void show_regs(struct pt_regs *regs)
{
int i;
char buf[128], *p;
char *level;
unsigned long cr30;
unsigned long cr31;
level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;
printk("%s\n", level); /* don't want to have that pretty register dump messed up */
printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
printbinary(buf, regs->gr[0], 32);
printk("%sPSW: %s %s\n", level, buf, print_tainted());
for (i = 0; i < 32; i += 4) {
int j;
p = buf;
p += sprintf(p, "%sr%02d-%02d ", level, i, i + 3);
for (j = 0; j < 4; j++) {
p += sprintf(p, " " RFMT, (i+j) == 0 ? 0 : regs->gr[i + j]);
}
printk("%s\n", buf);
}
for (i = 0; i < 8; i += 4) {
int j;
p = buf;
p += sprintf(p, "%ssr%d-%d ", level, i, i + 3);
for (j = 0; j < 4; j++) {
p += sprintf(p, " " RFMT, regs->sr[i + j]);
}
printk("%s\n", buf);
}
#if RIDICULOUSLY_VERBOSE
for (i = 0; i < 32; i += 2)
printk("%sFR%02d : %016lx FR%2d : %016lx", level, i,
regs->fr[i], i+1, regs->fr[i+1]);
#endif
cr30 = mfctl(30);
cr31 = mfctl(31);
printk("%s\n", level);
printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
level, regs->iir, regs->isr, regs->ior);
printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
level, current_thread_info()->cpu, cr30, cr31);
printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
printk(level);
print_symbol(" IAOQ[0]: %s\n", regs->iaoq[0]);
printk(level);
print_symbol(" IAOQ[1]: %s\n", regs->iaoq[1]);
printk(level);
print_symbol(" RP(r2): %s\n", regs->gr[2]);
}
void dump_stack(void)
{
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
static void do_show_stack(struct unwind_frame_info *info)
{
int i = 1;
printk("Backtrace:\n");
while (i <= 16) {
if (unwind_once(info) < 0 || info->ip == 0)
break;
if (__kernel_text_address(info->ip)) {
printk(" [<" RFMT ">] ", info->ip);
#ifdef CONFIG_KALLSYMS
print_symbol("%s\n", info->ip);
#else
if ((i & 0x03) == 0)
printk("\n");
#endif
i++;
}
}
printk("\n");
}
void show_stack(struct task_struct *task, unsigned long *s)
{
struct unwind_frame_info info;
if (!task) {
unsigned long sp;
struct pt_regs *r;
HERE:
asm volatile ("copy %%r30, %0" : "=r"(sp));
r = (struct pt_regs *)kmalloc(sizeof(struct pt_regs), GFP_KERNEL);
if (!r)
return;
memset(r, 0, sizeof(struct pt_regs));
r->iaoq[0] = (unsigned long)&&HERE;
r->gr[2] = (unsigned long)__builtin_return_address(0);
r->gr[30] = sp;
unwind_frame_init(&info, current, r);
kfree(r);
} else {
unwind_frame_init_from_blocked_task(&info, task);
}
do_show_stack(&info);
}
void die_if_kernel(char *str, struct pt_regs *regs, long err)
{
if (user_mode(regs)) {
if (err == 0)
return; /* STFU */
printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
current->comm, current->pid, str, err, regs->iaoq[0]);
#ifdef PRINT_USER_FAULTS
/* XXX for debugging only */
show_regs(regs);
#endif
return;
}
oops_in_progress = 1;
/* Amuse the user in a SPARC fashion */
printk(
" _______________________________ \n"
" < Your System ate a SPARC! Gah! >\n"
" ------------------------------- \n"
" \\ ^__^\n"
" \\ (xx)\\_______\n"
" (__)\\ )\\/\\\n"
" U ||----w |\n"
" || ||\n");
/* unlock the pdc lock if necessary */
pdc_emergency_unlock();
/* maybe the kernel hasn't booted very far yet and hasn't been able
* to initialize the serial or STI console. In that case we should
* re-enable the pdc console, so that the user will be able to
* identify the problem. */
if (!console_drivers)
pdc_console_restart();
printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
current->comm, current->pid, str, err);
show_regs(regs);
/* Wot's wrong wif bein' racy? */
if (current->thread.flags & PARISC_KERNEL_DEATH) {
printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
local_irq_enable();
while (1);
}
current->thread.flags |= PARISC_KERNEL_DEATH;
do_exit(SIGSEGV);
}
int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
{
return syscall(regs);
}
/* gdb uses break 4,8 */
#define GDB_BREAK_INSN 0x10004
void handle_gdb_break(struct pt_regs *regs, int wot)
{
struct siginfo si;
si.si_code = wot;
si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
si.si_signo = SIGTRAP;
si.si_errno = 0;
force_sig_info(SIGTRAP, &si, current);
}
void handle_break(unsigned iir, struct pt_regs *regs)
{
struct siginfo si;
switch(iir) {
case 0x00:
#ifdef PRINT_USER_FAULTS
printk(KERN_DEBUG "break 0,0: pid=%d command='%s'\n",
current->pid, current->comm);
#endif
die_if_kernel("Breakpoint", regs, 0);
#ifdef PRINT_USER_FAULTS
show_regs(regs);
#endif
si.si_code = TRAP_BRKPT;
si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
si.si_signo = SIGTRAP;
force_sig_info(SIGTRAP, &si, current);
break;
case GDB_BREAK_INSN:
die_if_kernel("Breakpoint", regs, 0);
handle_gdb_break(regs, TRAP_BRKPT);
break;
default:
#ifdef PRINT_USER_FAULTS
printk(KERN_DEBUG "break %#08x: pid=%d command='%s'\n",
iir, current->pid, current->comm);
show_regs(regs);
#endif
si.si_signo = SIGTRAP;
si.si_code = TRAP_BRKPT;
si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
force_sig_info(SIGTRAP, &si, current);
return;
}
}
int handle_toc(void)
{
printk(KERN_CRIT "TOC call.\n");
return 0;
}
static void default_trap(int code, struct pt_regs *regs)
{
printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
show_regs(regs);
}
void (*cpu_lpmc) (int code, struct pt_regs *regs) = default_trap;
void transfer_pim_to_trap_frame(struct pt_regs *regs)
{
register int i;
extern unsigned int hpmc_pim_data[];
struct pdc_hpmc_pim_11 *pim_narrow;
struct pdc_hpmc_pim_20 *pim_wide;
if (boot_cpu_data.cpu_type >= pcxu) {
pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
/*
* Note: The following code will probably generate a
* bunch of truncation error warnings from the compiler.
* Could be handled with an ifdef, but perhaps there
* is a better way.
*/
regs->gr[0] = pim_wide->cr[22];
for (i = 1; i < 32; i++)
regs->gr[i] = pim_wide->gr[i];
for (i = 0; i < 32; i++)
regs->fr[i] = pim_wide->fr[i];
for (i = 0; i < 8; i++)
regs->sr[i] = pim_wide->sr[i];
regs->iasq[0] = pim_wide->cr[17];
regs->iasq[1] = pim_wide->iasq_back;
regs->iaoq[0] = pim_wide->cr[18];
regs->iaoq[1] = pim_wide->iaoq_back;
regs->sar = pim_wide->cr[11];
regs->iir = pim_wide->cr[19];
regs->isr = pim_wide->cr[20];
regs->ior = pim_wide->cr[21];
}
else {
pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
regs->gr[0] = pim_narrow->cr[22];
for (i = 1; i < 32; i++)
regs->gr[i] = pim_narrow->gr[i];
for (i = 0; i < 32; i++)
regs->fr[i] = pim_narrow->fr[i];
for (i = 0; i < 8; i++)
regs->sr[i] = pim_narrow->sr[i];
regs->iasq[0] = pim_narrow->cr[17];
regs->iasq[1] = pim_narrow->iasq_back;
regs->iaoq[0] = pim_narrow->cr[18];
regs->iaoq[1] = pim_narrow->iaoq_back;
regs->sar = pim_narrow->cr[11];
regs->iir = pim_narrow->cr[19];
regs->isr = pim_narrow->cr[20];
regs->ior = pim_narrow->cr[21];
}
/*
* The following fields only have meaning if we came through
* another path. So just zero them here.
*/
regs->ksp = 0;
regs->kpc = 0;
regs->orig_r28 = 0;
}
/*
* This routine is called as a last resort when everything else
* has gone clearly wrong. We get called for faults in kernel space,
* and HPMC's.
*/
void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
{
static DEFINE_SPINLOCK(terminate_lock);
oops_in_progress = 1;
set_eiem(0);
local_irq_disable();
spin_lock(&terminate_lock);
/* unlock the pdc lock if necessary */
pdc_emergency_unlock();
/* restart pdc console if necessary */
if (!console_drivers)
pdc_console_restart();
/* Not all paths will gutter the processor... */
switch(code){
case 1:
transfer_pim_to_trap_frame(regs);
break;
default:
/* Fall through */
break;
}
{
/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
struct unwind_frame_info info;
unwind_frame_init(&info, current, regs);
do_show_stack(&info);
}
printk("\n");
printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
msg, code, regs, offset);
show_regs(regs);
spin_unlock(&terminate_lock);
/* put soft power button back under hardware control;
* if the user had pressed it once at any time, the
* system will shut down immediately right here. */
pdc_soft_power_button(0);
/* Call kernel panic() so reboot timeouts work properly
* FIXME: This function should be on the list of
* panic notifiers, and we should call panic
* directly from the location that we wish.
* e.g. We should not call panic from
* parisc_terminate, but rather the oter way around.
* This hack works, prints the panic message twice,
* and it enables reboot timers!
*/
panic(msg);
}
void handle_interruption(int code, struct pt_regs *regs)
{
unsigned long fault_address = 0;
unsigned long fault_space = 0;
struct siginfo si;
if (code == 1)
pdc_console_restart(); /* switch back to pdc if HPMC */
else
local_irq_enable();
/* Security check:
* If the priority level is still user, and the
* faulting space is not equal to the active space
* then the user is attempting something in a space
* that does not belong to them. Kill the process.
*
* This is normally the situation when the user
* attempts to jump into the kernel space at the
* wrong offset, be it at the gateway page or a
* random location.
*
* We cannot normally signal the process because it
* could *be* on the gateway page, and processes
* executing on the gateway page can't have signals
* delivered.
*
* We merely readjust the address into the users
* space, at a destination address of zero, and
* allow processing to continue.
*/
if (((unsigned long)regs->iaoq[0] & 3) &&
((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
/* Kill the user process later */
regs->iaoq[0] = 0 | 3;
regs->iaoq[1] = regs->iaoq[0] + 4;
regs->iasq[0] = regs->iasq[0] = regs->sr[7];
regs->gr[0] &= ~PSW_B;
return;
}
#if 0
printk(KERN_CRIT "Interruption # %d\n", code);
#endif
switch(code) {
case 1:
/* High-priority machine check (HPMC) */
/* set up a new led state on systems shipped with a LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
parisc_terminate("High Priority Machine Check (HPMC)",
regs, code, 0);
/* NOT REACHED */
case 2:
/* Power failure interrupt */
printk(KERN_CRIT "Power failure interrupt !\n");
return;
case 3:
/* Recovery counter trap */
regs->gr[0] &= ~PSW_R;
if (user_space(regs))
handle_gdb_break(regs, TRAP_TRACE);
/* else this must be the start of a syscall - just let it run */
return;
case 5:
/* Low-priority machine check */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
flush_all_caches();
cpu_lpmc(5, regs);
return;
case 6:
/* Instruction TLB miss fault/Instruction page fault */
fault_address = regs->iaoq[0];
fault_space = regs->iasq[0];
break;
case 8:
/* Illegal instruction trap */
die_if_kernel("Illegal instruction", regs, code);
si.si_code = ILL_ILLOPC;
goto give_sigill;
case 9:
/* Break instruction trap */
handle_break(regs->iir,regs);
return;
case 10:
/* Privileged operation trap */
die_if_kernel("Privileged operation", regs, code);
si.si_code = ILL_PRVOPC;
goto give_sigill;
case 11:
/* Privileged register trap */
if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
/* This is a MFCTL cr26/cr27 to gr instruction.
* PCXS traps on this, so we need to emulate it.
*/
if (regs->iir & 0x00200000)
regs->gr[regs->iir & 0x1f] = mfctl(27);
else
regs->gr[regs->iir & 0x1f] = mfctl(26);
regs->iaoq[0] = regs->iaoq[1];
regs->iaoq[1] += 4;
regs->iasq[0] = regs->iasq[1];
return;
}
die_if_kernel("Privileged register usage", regs, code);
si.si_code = ILL_PRVREG;
give_sigill:
si.si_signo = SIGILL;
si.si_errno = 0;
si.si_addr = (void __user *) regs->iaoq[0];
force_sig_info(SIGILL, &si, current);
return;
case 12:
/* Overflow Trap, let the userland signal handler do the cleanup */
si.si_signo = SIGFPE;
si.si_code = FPE_INTOVF;
si.si_addr = (void __user *) regs->iaoq[0];
force_sig_info(SIGFPE, &si, current);
return;
case 13:
/* Conditional Trap
The condition succees in an instruction which traps
on condition */
if(user_mode(regs)){
si.si_signo = SIGFPE;
/* Set to zero, and let the userspace app figure it out from
the insn pointed to by si_addr */
si.si_code = 0;
si.si_addr = (void __user *) regs->iaoq[0];
force_sig_info(SIGFPE, &si, current);
return;
}
/* The kernel doesn't want to handle condition codes */
break;
case 14:
/* Assist Exception Trap, i.e. floating point exception. */
die_if_kernel("Floating point exception", regs, 0); /* quiet */
handle_fpe(regs);
return;
case 15:
/* Data TLB miss fault/Data page fault */
/* Fall through */
case 16:
/* Non-access instruction TLB miss fault */
/* The instruction TLB entry needed for the target address of the FIC
is absent, and hardware can't find it, so we get to cleanup */
/* Fall through */
case 17:
/* Non-access data TLB miss fault/Non-access data page fault */
/* FIXME:
Still need to add slow path emulation code here!
If the insn used a non-shadow register, then the tlb
handlers could not have their side-effect (e.g. probe
writing to a target register) emulated since rfir would
erase the changes to said register. Instead we have to
setup everything, call this function we are in, and emulate
by hand. Technically we need to emulate:
fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
*/
fault_address = regs->ior;
fault_space = regs->isr;
break;
case 18:
/* PCXS only -- later cpu's split this into types 26,27 & 28 */
/* Check for unaligned access */
if (check_unaligned(regs)) {
handle_unaligned(regs);
return;
}
/* Fall Through */
case 26:
/* PCXL: Data memory access rights trap */
fault_address = regs->ior;
fault_space = regs->isr;
break;
case 19:
/* Data memory break trap */
regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
/* fall thru */
case 21:
/* Page reference trap */
handle_gdb_break(regs, TRAP_HWBKPT);
return;
case 25:
/* Taken branch trap */
regs->gr[0] &= ~PSW_T;
if (user_space(regs))
handle_gdb_break(regs, TRAP_BRANCH);
/* else this must be the start of a syscall - just let it
* run.
*/
return;
case 7:
/* Instruction access rights */
/* PCXL: Instruction memory protection trap */
/*
* This could be caused by either: 1) a process attempting
* to execute within a vma that does not have execute
* permission, or 2) an access rights violation caused by a
* flush only translation set up by ptep_get_and_clear().
* So we check the vma permissions to differentiate the two.
* If the vma indicates we have execute permission, then
* the cause is the latter one. In this case, we need to
* call do_page_fault() to fix the problem.
*/
if (user_mode(regs)) {
struct vm_area_struct *vma;
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm,regs->iaoq[0]);
if (vma && (regs->iaoq[0] >= vma->vm_start)
&& (vma->vm_flags & VM_EXEC)) {
fault_address = regs->iaoq[0];
fault_space = regs->iasq[0];
up_read(&current->mm->mmap_sem);
break; /* call do_page_fault() */
}
up_read(&current->mm->mmap_sem);
}
/* Fall Through */
case 27:
/* Data memory protection ID trap */
die_if_kernel("Protection id trap", regs, code);
si.si_code = SEGV_MAPERR;
si.si_signo = SIGSEGV;
si.si_errno = 0;
if (code == 7)
si.si_addr = (void __user *) regs->iaoq[0];
else
si.si_addr = (void __user *) regs->ior;
force_sig_info(SIGSEGV, &si, current);
return;
case 28:
/* Unaligned data reference trap */
handle_unaligned(regs);
return;
default:
if (user_mode(regs)) {
#ifdef PRINT_USER_FAULTS
printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
current->pid, current->comm);
show_regs(regs);
#endif
/* SIGBUS, for lack of a better one. */
si.si_signo = SIGBUS;
si.si_code = BUS_OBJERR;
si.si_errno = 0;
si.si_addr = (void __user *) regs->ior;
force_sig_info(SIGBUS, &si, current);
return;
}
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Unexpected interruption", regs, code, 0);
/* NOT REACHED */
}
if (user_mode(regs)) {
if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
#ifdef PRINT_USER_FAULTS
if (fault_space == 0)
printk(KERN_DEBUG "User Fault on Kernel Space ");
else
printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
code);
printk("pid=%d command='%s'\n", current->pid, current->comm);
show_regs(regs);
#endif
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = SEGV_MAPERR;
si.si_addr = (void __user *) regs->ior;
force_sig_info(SIGSEGV, &si, current);
return;
}
}
else {
/*
* The kernel should never fault on its own address space.
*/
if (fault_space == 0)
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Kernel Fault", regs, code, fault_address);
}
}
do_page_fault(regs, code, fault_address);
}
int __init check_ivt(void *iva)
{
int i;
u32 check = 0;
u32 *ivap;
u32 *hpmcp;
u32 length;
extern void os_hpmc(void);
extern void os_hpmc_end(void);
if (strcmp((char *)iva, "cows can fly"))
return -1;
ivap = (u32 *)iva;
for (i = 0; i < 8; i++)
*ivap++ = 0;
/* Compute Checksum for HPMC handler */
length = (u32)((unsigned long)os_hpmc_end - (unsigned long)os_hpmc);
ivap[7] = length;
hpmcp = (u32 *)os_hpmc;
for (i=0; i<length/4; i++)
check += *hpmcp++;
for (i=0; i<8; i++)
check += ivap[i];
ivap[5] = -check;
return 0;
}
#ifndef __LP64__
extern const void fault_vector_11;
#endif
extern const void fault_vector_20;
void __init trap_init(void)
{
void *iva;
if (boot_cpu_data.cpu_type >= pcxu)
iva = (void *) &fault_vector_20;
else
#ifdef __LP64__
panic("Can't boot 64-bit OS on PA1.1 processor!");
#else
iva = (void *) &fault_vector_11;
#endif
if (check_ivt(iva))
panic("IVT invalid");
}

View File

@@ -0,0 +1,816 @@
/*
* Unaligned memory access handler
*
* Copyright (C) 2001 Randolph Chung <tausq@debian.org>
* Significantly tweaked by LaMont Jones <lamont@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/uaccess.h>
/* #define DEBUG_UNALIGNED 1 */
#ifdef DEBUG_UNALIGNED
#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __FUNCTION__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
#else
#define DPRINTF(fmt, args...)
#endif
#ifdef __LP64__
#define RFMT "%016lx"
#else
#define RFMT "%08lx"
#endif
#define FIXUP_BRANCH(lbl) \
"\tldil L%%" #lbl ", %%r1\n" \
"\tldo R%%" #lbl "(%%r1), %%r1\n" \
"\tbv,n %%r0(%%r1)\n"
/* 1111 1100 0000 0000 0001 0011 1100 0000 */
#define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6)
#define OPCODE2(a,b) ((a)<<26|(b)<<1)
#define OPCODE3(a,b) ((a)<<26|(b)<<2)
#define OPCODE4(a) ((a)<<26)
#define OPCODE1_MASK OPCODE1(0x3f,1,0xf)
#define OPCODE2_MASK OPCODE2(0x3f,1)
#define OPCODE3_MASK OPCODE3(0x3f,1)
#define OPCODE4_MASK OPCODE4(0x3f)
/* skip LDB - never unaligned (index) */
#define OPCODE_LDH_I OPCODE1(0x03,0,0x1)
#define OPCODE_LDW_I OPCODE1(0x03,0,0x2)
#define OPCODE_LDD_I OPCODE1(0x03,0,0x3)
#define OPCODE_LDDA_I OPCODE1(0x03,0,0x4)
#define OPCODE_LDCD_I OPCODE1(0x03,0,0x5)
#define OPCODE_LDWA_I OPCODE1(0x03,0,0x6)
#define OPCODE_LDCW_I OPCODE1(0x03,0,0x7)
/* skip LDB - never unaligned (short) */
#define OPCODE_LDH_S OPCODE1(0x03,1,0x1)
#define OPCODE_LDW_S OPCODE1(0x03,1,0x2)
#define OPCODE_LDD_S OPCODE1(0x03,1,0x3)
#define OPCODE_LDDA_S OPCODE1(0x03,1,0x4)
#define OPCODE_LDCD_S OPCODE1(0x03,1,0x5)
#define OPCODE_LDWA_S OPCODE1(0x03,1,0x6)
#define OPCODE_LDCW_S OPCODE1(0x03,1,0x7)
/* skip STB - never unaligned */
#define OPCODE_STH OPCODE1(0x03,1,0x9)
#define OPCODE_STW OPCODE1(0x03,1,0xa)
#define OPCODE_STD OPCODE1(0x03,1,0xb)
/* skip STBY - never unaligned */
/* skip STDBY - never unaligned */
#define OPCODE_STWA OPCODE1(0x03,1,0xe)
#define OPCODE_STDA OPCODE1(0x03,1,0xf)
#define OPCODE_FLDWX OPCODE1(0x09,0,0x0)
#define OPCODE_FLDWXR OPCODE1(0x09,0,0x1)
#define OPCODE_FSTWX OPCODE1(0x09,0,0x8)
#define OPCODE_FSTWXR OPCODE1(0x09,0,0x9)
#define OPCODE_FLDWS OPCODE1(0x09,1,0x0)
#define OPCODE_FLDWSR OPCODE1(0x09,1,0x1)
#define OPCODE_FSTWS OPCODE1(0x09,1,0x8)
#define OPCODE_FSTWSR OPCODE1(0x09,1,0x9)
#define OPCODE_FLDDX OPCODE1(0x0b,0,0x0)
#define OPCODE_FSTDX OPCODE1(0x0b,0,0x8)
#define OPCODE_FLDDS OPCODE1(0x0b,1,0x0)
#define OPCODE_FSTDS OPCODE1(0x0b,1,0x8)
#define OPCODE_LDD_L OPCODE2(0x14,0)
#define OPCODE_FLDD_L OPCODE2(0x14,1)
#define OPCODE_STD_L OPCODE2(0x1c,0)
#define OPCODE_FSTD_L OPCODE2(0x1c,1)
#define OPCODE_LDW_M OPCODE3(0x17,1)
#define OPCODE_FLDW_L OPCODE3(0x17,0)
#define OPCODE_FSTW_L OPCODE3(0x1f,0)
#define OPCODE_STW_M OPCODE3(0x1f,1)
#define OPCODE_LDH_L OPCODE4(0x11)
#define OPCODE_LDW_L OPCODE4(0x12)
#define OPCODE_LDWM OPCODE4(0x13)
#define OPCODE_STH_L OPCODE4(0x19)
#define OPCODE_STW_L OPCODE4(0x1A)
#define OPCODE_STWM OPCODE4(0x1B)
#define MAJOR_OP(i) (((i)>>26)&0x3f)
#define R1(i) (((i)>>21)&0x1f)
#define R2(i) (((i)>>16)&0x1f)
#define R3(i) ((i)&0x1f)
#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
#define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
#define IM5_2(i) IM((i)>>16,5)
#define IM5_3(i) IM((i),5)
#define IM14(i) IM((i),14)
#define ERR_NOTHANDLED -1
#define ERR_PAGEFAULT -2
int unaligned_enabled = 1;
void die_if_kernel (char *str, struct pt_regs *regs, long err);
static int emulate_ldh(struct pt_regs *regs, int toreg)
{
unsigned long saddr = regs->ior;
unsigned long val = 0;
int ret;
DPRINTF("load " RFMT ":" RFMT " to r%d for 2 bytes\n",
regs->isr, regs->ior, toreg);
__asm__ __volatile__ (
" mtsp %4, %%sr1\n"
"1: ldbs 0(%%sr1,%3), %%r20\n"
"2: ldbs 1(%%sr1,%3), %0\n"
" depw %%r20, 23, 24, %0\n"
" copy %%r0, %1\n"
"3: \n"
" .section .fixup,\"ax\"\n"
"4: ldi -2, %1\n"
FIXUP_BRANCH(3b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,4b\n"
" .dword 2b,4b\n"
#else
" .word 1b,4b\n"
" .word 2b,4b\n"
#endif
" .previous\n"
: "=r" (val), "=r" (ret)
: "0" (val), "r" (saddr), "r" (regs->isr)
: "r20" );
DPRINTF("val = 0x" RFMT "\n", val);
if (toreg)
regs->gr[toreg] = val;
return ret;
}
static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
{
unsigned long saddr = regs->ior;
unsigned long val = 0;
int ret;
DPRINTF("load " RFMT ":" RFMT " to r%d for 4 bytes\n",
regs->isr, regs->ior, toreg);
__asm__ __volatile__ (
" zdep %3,28,2,%%r19\n" /* r19=(ofs&3)*8 */
" mtsp %4, %%sr1\n"
" depw %%r0,31,2,%3\n"
"1: ldw 0(%%sr1,%3),%0\n"
"2: ldw 4(%%sr1,%3),%%r20\n"
" subi 32,%%r19,%%r19\n"
" mtctl %%r19,11\n"
" vshd %0,%%r20,%0\n"
" copy %%r0, %1\n"
"3: \n"
" .section .fixup,\"ax\"\n"
"4: ldi -2, %1\n"
FIXUP_BRANCH(3b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,4b\n"
" .dword 2b,4b\n"
#else
" .word 1b,4b\n"
" .word 2b,4b\n"
#endif
" .previous\n"
: "=r" (val), "=r" (ret)
: "0" (val), "r" (saddr), "r" (regs->isr)
: "r19", "r20" );
DPRINTF("val = 0x" RFMT "\n", val);
if (flop)
((__u32*)(regs->fr))[toreg] = val;
else if (toreg)
regs->gr[toreg] = val;
return ret;
}
static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
{
unsigned long saddr = regs->ior;
__u64 val = 0;
int ret;
DPRINTF("load " RFMT ":" RFMT " to r%d for 8 bytes\n",
regs->isr, regs->ior, toreg);
#ifdef CONFIG_PA20
#ifndef __LP64__
if (!flop)
return -1;
#endif
__asm__ __volatile__ (
" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */
" mtsp %4, %%sr1\n"
" depd %%r0,63,3,%3\n"
"1: ldd 0(%%sr1,%3),%0\n"
"2: ldd 8(%%sr1,%3),%%r20\n"
" subi 64,%%r19,%%r19\n"
" mtsar %%r19\n"
" shrpd %0,%%r20,%%sar,%0\n"
" copy %%r0, %1\n"
"3: \n"
" .section .fixup,\"ax\"\n"
"4: ldi -2, %1\n"
FIXUP_BRANCH(3b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,4b\n"
" .dword 2b,4b\n"
#else
" .word 1b,4b\n"
" .word 2b,4b\n"
#endif
" .previous\n"
: "=r" (val), "=r" (ret)
: "0" (val), "r" (saddr), "r" (regs->isr)
: "r19", "r20" );
#else
{
unsigned long valh=0,vall=0;
__asm__ __volatile__ (
" zdep %5,29,2,%%r19\n" /* r19=(ofs&3)*8 */
" mtsp %6, %%sr1\n"
" dep %%r0,31,2,%5\n"
"1: ldw 0(%%sr1,%5),%0\n"
"2: ldw 4(%%sr1,%5),%1\n"
"3: ldw 8(%%sr1,%5),%%r20\n"
" subi 32,%%r19,%%r19\n"
" mtsar %%r19\n"
" vshd %0,%1,%0\n"
" vshd %1,%%r20,%1\n"
" copy %%r0, %2\n"
"4: \n"
" .section .fixup,\"ax\"\n"
"5: ldi -2, %2\n"
FIXUP_BRANCH(4b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,5b\n"
" .dword 2b,5b\n"
" .dword 3b,5b\n"
#else
" .word 1b,5b\n"
" .word 2b,5b\n"
" .word 3b,5b\n"
#endif
" .previous\n"
: "=r" (valh), "=r" (vall), "=r" (ret)
: "0" (valh), "1" (vall), "r" (saddr), "r" (regs->isr)
: "r19", "r20" );
val=((__u64)valh<<32)|(__u64)vall;
}
#endif
DPRINTF("val = 0x%llx\n", val);
if (flop)
regs->fr[toreg] = val;
else if (toreg)
regs->gr[toreg] = val;
return ret;
}
static int emulate_sth(struct pt_regs *regs, int frreg)
{
unsigned long val = regs->gr[frreg];
int ret;
if (!frreg)
val = 0;
DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 2 bytes\n", frreg,
val, regs->isr, regs->ior);
__asm__ __volatile__ (
" mtsp %3, %%sr1\n"
" extrw,u %1, 23, 8, %%r19\n"
"1: stb %1, 1(%%sr1, %2)\n"
"2: stb %%r19, 0(%%sr1, %2)\n"
" copy %%r0, %0\n"
"3: \n"
" .section .fixup,\"ax\"\n"
"4: ldi -2, %0\n"
FIXUP_BRANCH(3b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,4b\n"
" .dword 2b,4b\n"
#else
" .word 1b,4b\n"
" .word 2b,4b\n"
#endif
" .previous\n"
: "=r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19" );
return ret;
}
static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
{
unsigned long val;
int ret;
if (flop)
val = ((__u32*)(regs->fr))[frreg];
else if (frreg)
val = regs->gr[frreg];
else
val = 0;
DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 4 bytes\n", frreg,
val, regs->isr, regs->ior);
__asm__ __volatile__ (
" mtsp %3, %%sr1\n"
" zdep %2, 28, 2, %%r19\n"
" dep %%r0, 31, 2, %2\n"
" mtsar %%r19\n"
" depwi,z -2, %%sar, 32, %%r19\n"
"1: ldw 0(%%sr1,%2),%%r20\n"
"2: ldw 4(%%sr1,%2),%%r21\n"
" vshd %%r0, %1, %%r22\n"
" vshd %1, %%r0, %%r1\n"
" and %%r20, %%r19, %%r20\n"
" andcm %%r21, %%r19, %%r21\n"
" or %%r22, %%r20, %%r20\n"
" or %%r1, %%r21, %%r21\n"
" stw %%r20,0(%%sr1,%2)\n"
" stw %%r21,4(%%sr1,%2)\n"
" copy %%r0, %0\n"
"3: \n"
" .section .fixup,\"ax\"\n"
"4: ldi -2, %0\n"
FIXUP_BRANCH(3b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,4b\n"
" .dword 2b,4b\n"
#else
" .word 1b,4b\n"
" .word 2b,4b\n"
#endif
" .previous\n"
: "=r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1" );
return 0;
}
static int emulate_std(struct pt_regs *regs, int frreg, int flop)
{
__u64 val;
int ret;
if (flop)
val = regs->fr[frreg];
else if (frreg)
val = regs->gr[frreg];
else
val = 0;
DPRINTF("store r%d (0x%016llx) to " RFMT ":" RFMT " for 8 bytes\n", frreg,
val, regs->isr, regs->ior);
#ifdef CONFIG_PA20
#ifndef __LP64__
if (!flop)
return -1;
#endif
__asm__ __volatile__ (
" mtsp %3, %%sr1\n"
" depd,z %2, 60, 3, %%r19\n"
" depd %%r0, 63, 3, %2\n"
" mtsar %%r19\n"
" depdi,z -2, %%sar, 64, %%r19\n"
"1: ldd 0(%%sr1,%2),%%r20\n"
"2: ldd 8(%%sr1,%2),%%r21\n"
" shrpd %%r0, %1, %%sar, %%r22\n"
" shrpd %1, %%r0, %%sar, %%r1\n"
" and %%r20, %%r19, %%r20\n"
" andcm %%r21, %%r19, %%r21\n"
" or %%r22, %%r20, %%r20\n"
" or %%r1, %%r21, %%r21\n"
"3: std %%r20,0(%%sr1,%2)\n"
"4: std %%r21,8(%%sr1,%2)\n"
" copy %%r0, %0\n"
"5: \n"
" .section .fixup,\"ax\"\n"
"6: ldi -2, %0\n"
FIXUP_BRANCH(5b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,6b\n"
" .dword 2b,6b\n"
" .dword 3b,6b\n"
" .dword 4b,6b\n"
#else
" .word 1b,6b\n"
" .word 2b,6b\n"
" .word 3b,6b\n"
" .word 4b,6b\n"
#endif
" .previous\n"
: "=r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1" );
#else
{
unsigned long valh=(val>>32),vall=(val&0xffffffffl);
__asm__ __volatile__ (
" mtsp %4, %%sr1\n"
" zdep %2, 29, 2, %%r19\n"
" dep %%r0, 31, 2, %2\n"
" mtsar %%r19\n"
" zvdepi -2, 32, %%r19\n"
"1: ldw 0(%%sr1,%3),%%r20\n"
"2: ldw 8(%%sr1,%3),%%r21\n"
" vshd %1, %2, %%r1\n"
" vshd %%r0, %1, %1\n"
" vshd %2, %%r0, %2\n"
" and %%r20, %%r19, %%r20\n"
" andcm %%r21, %%r19, %%r21\n"
" or %1, %%r20, %1\n"
" or %2, %%r21, %2\n"
"3: stw %1,0(%%sr1,%1)\n"
"4: stw %%r1,4(%%sr1,%3)\n"
"5: stw %2,8(%%sr1,%3)\n"
" copy %%r0, %0\n"
"6: \n"
" .section .fixup,\"ax\"\n"
"7: ldi -2, %0\n"
FIXUP_BRANCH(6b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,7b\n"
" .dword 2b,7b\n"
" .dword 3b,7b\n"
" .dword 4b,7b\n"
" .dword 5b,7b\n"
#else
" .word 1b,7b\n"
" .word 2b,7b\n"
" .word 3b,7b\n"
" .word 4b,7b\n"
" .word 5b,7b\n"
#endif
" .previous\n"
: "=r" (ret)
: "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r1" );
}
#endif
return ret;
}
void handle_unaligned(struct pt_regs *regs)
{
static unsigned long unaligned_count = 0;
static unsigned long last_time = 0;
unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0;
int modify = 0;
int ret = ERR_NOTHANDLED;
struct siginfo si;
register int flop=0; /* true if this is a flop */
/* log a message with pacing */
if (user_mode(regs))
{
if (unaligned_count > 5 && jiffies - last_time > 5*HZ)
{
unaligned_count = 0;
last_time = jiffies;
}
if (++unaligned_count < 5)
{
char buf[256];
sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n",
current->comm, current->pid, regs->ior, regs->iaoq[0]);
printk(KERN_WARNING "%s", buf);
#ifdef DEBUG_UNALIGNED
show_regs(regs);
#endif
}
if (!unaligned_enabled)
goto force_sigbus;
}
/* handle modification - OK, it's ugly, see the instruction manual */
switch (MAJOR_OP(regs->iir))
{
case 0x03:
case 0x09:
case 0x0b:
if (regs->iir&0x20)
{
modify = 1;
if (regs->iir&0x1000) /* short loads */
if (regs->iir&0x200)
newbase += IM5_3(regs->iir);
else
newbase += IM5_2(regs->iir);
else if (regs->iir&0x2000) /* scaled indexed */
{
int shift=0;
switch (regs->iir & OPCODE1_MASK)
{
case OPCODE_LDH_I:
shift= 1; break;
case OPCODE_LDW_I:
shift= 2; break;
case OPCODE_LDD_I:
case OPCODE_LDDA_I:
shift= 3; break;
}
newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0)<<shift;
} else /* simple indexed */
newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0);
}
break;
case 0x13:
case 0x1b:
modify = 1;
newbase += IM14(regs->iir);
break;
case 0x14:
case 0x1c:
if (regs->iir&8)
{
modify = 1;
newbase += IM14(regs->iir&~0xe);
}
break;
case 0x16:
case 0x1e:
modify = 1;
newbase += IM14(regs->iir&6);
break;
case 0x17:
case 0x1f:
if (regs->iir&4)
{
modify = 1;
newbase += IM14(regs->iir&~4);
}
break;
}
/* TODO: make this cleaner... */
switch (regs->iir & OPCODE1_MASK)
{
case OPCODE_LDH_I:
case OPCODE_LDH_S:
ret = emulate_ldh(regs, R3(regs->iir));
break;
case OPCODE_LDW_I:
case OPCODE_LDWA_I:
case OPCODE_LDW_S:
case OPCODE_LDWA_S:
ret = emulate_ldw(regs, R3(regs->iir),0);
break;
case OPCODE_STH:
ret = emulate_sth(regs, R2(regs->iir));
break;
case OPCODE_STW:
case OPCODE_STWA:
ret = emulate_stw(regs, R2(regs->iir),0);
break;
#ifdef CONFIG_PA20
case OPCODE_LDD_I:
case OPCODE_LDDA_I:
case OPCODE_LDD_S:
case OPCODE_LDDA_S:
ret = emulate_ldd(regs, R3(regs->iir),0);
break;
case OPCODE_STD:
case OPCODE_STDA:
ret = emulate_std(regs, R2(regs->iir),0);
break;
#endif
case OPCODE_FLDWX:
case OPCODE_FLDWS:
case OPCODE_FLDWXR:
case OPCODE_FLDWSR:
flop=1;
ret = emulate_ldw(regs,FR3(regs->iir),1);
break;
case OPCODE_FLDDX:
case OPCODE_FLDDS:
flop=1;
ret = emulate_ldd(regs,R3(regs->iir),1);
break;
case OPCODE_FSTWX:
case OPCODE_FSTWS:
case OPCODE_FSTWXR:
case OPCODE_FSTWSR:
flop=1;
ret = emulate_stw(regs,FR3(regs->iir),1);
break;
case OPCODE_FSTDX:
case OPCODE_FSTDS:
flop=1;
ret = emulate_std(regs,R3(regs->iir),1);
break;
case OPCODE_LDCD_I:
case OPCODE_LDCW_I:
case OPCODE_LDCD_S:
case OPCODE_LDCW_S:
ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */
break;
}
#ifdef CONFIG_PA20
switch (regs->iir & OPCODE2_MASK)
{
case OPCODE_FLDD_L:
flop=1;
ret = emulate_ldd(regs,R2(regs->iir),1);
break;
case OPCODE_FSTD_L:
flop=1;
ret = emulate_std(regs, R2(regs->iir),1);
break;
#ifdef CONFIG_PA20
case OPCODE_LDD_L:
ret = emulate_ldd(regs, R2(regs->iir),0);
break;
case OPCODE_STD_L:
ret = emulate_std(regs, R2(regs->iir),0);
break;
#endif
}
#endif
switch (regs->iir & OPCODE3_MASK)
{
case OPCODE_FLDW_L:
flop=1;
ret = emulate_ldw(regs, R2(regs->iir),0);
break;
case OPCODE_LDW_M:
ret = emulate_ldw(regs, R2(regs->iir),1);
break;
case OPCODE_FSTW_L:
flop=1;
ret = emulate_stw(regs, R2(regs->iir),1);
break;
case OPCODE_STW_M:
ret = emulate_stw(regs, R2(regs->iir),0);
break;
}
switch (regs->iir & OPCODE4_MASK)
{
case OPCODE_LDH_L:
ret = emulate_ldh(regs, R2(regs->iir));
break;
case OPCODE_LDW_L:
case OPCODE_LDWM:
ret = emulate_ldw(regs, R2(regs->iir),0);
break;
case OPCODE_STH_L:
ret = emulate_sth(regs, R2(regs->iir));
break;
case OPCODE_STW_L:
case OPCODE_STWM:
ret = emulate_stw(regs, R2(regs->iir),0);
break;
}
if (modify && R1(regs->iir))
regs->gr[R1(regs->iir)] = newbase;
if (ret == ERR_NOTHANDLED)
printk(KERN_CRIT "Not-handled unaligned insn 0x%08lx\n", regs->iir);
DPRINTF("ret = %d\n", ret);
if (ret)
{
printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
die_if_kernel("Unaligned data reference", regs, 28);
if (ret == ERR_PAGEFAULT)
{
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = SEGV_MAPERR;
si.si_addr = (void __user *)regs->ior;
force_sig_info(SIGSEGV, &si, current);
}
else
{
force_sigbus:
/* couldn't handle it ... */
si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_ADRALN;
si.si_addr = (void __user *)regs->ior;
force_sig_info(SIGBUS, &si, current);
}
return;
}
/* else we handled it, let life go on. */
regs->gr[0]|=PSW_N;
}
/*
* NB: check_unaligned() is only used for PCXS processors right
* now, so we only check for PA1.1 encodings at this point.
*/
int
check_unaligned(struct pt_regs *regs)
{
unsigned long align_mask;
/* Get alignment mask */
align_mask = 0UL;
switch (regs->iir & OPCODE1_MASK) {
case OPCODE_LDH_I:
case OPCODE_LDH_S:
case OPCODE_STH:
align_mask = 1UL;
break;
case OPCODE_LDW_I:
case OPCODE_LDWA_I:
case OPCODE_LDW_S:
case OPCODE_LDWA_S:
case OPCODE_STW:
case OPCODE_STWA:
align_mask = 3UL;
break;
default:
switch (regs->iir & OPCODE4_MASK) {
case OPCODE_LDH_L:
case OPCODE_STH_L:
align_mask = 1UL;
break;
case OPCODE_LDW_L:
case OPCODE_LDWM:
case OPCODE_STW_L:
case OPCODE_STWM:
align_mask = 3UL;
break;
}
break;
}
return (int)(regs->ior & align_mask);
}

393
arch/parisc/kernel/unwind.c Normal file
View File

@@ -0,0 +1,393 @@
/*
* Kernel unwinding support
*
* (c) 2002-2004 Randolph Chung <tausq@debian.org>
*
* Derived partially from the IA64 implementation. The PA-RISC
* Runtime Architecture Document is also a useful reference to
* understand what is happening here
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kallsyms.h>
#include <asm/uaccess.h>
#include <asm/assembly.h>
#include <asm/unwind.h>
/* #define DEBUG 1 */
#ifdef DEBUG
#define dbg(x...) printk(x)
#else
#define dbg(x...)
#endif
extern struct unwind_table_entry __start___unwind[];
extern struct unwind_table_entry __stop___unwind[];
static spinlock_t unwind_lock;
/*
* the kernel unwind block is not dynamically allocated so that
* we can call unwind_init as early in the bootup process as
* possible (before the slab allocator is initialized)
*/
static struct unwind_table kernel_unwind_table;
static LIST_HEAD(unwind_tables);
static inline const struct unwind_table_entry *
find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
{
const struct unwind_table_entry *e = NULL;
unsigned long lo, hi, mid;
lo = 0;
hi = table->length - 1;
while (lo <= hi) {
mid = (hi - lo) / 2 + lo;
e = &table->table[mid];
if (addr < e->region_start)
hi = mid - 1;
else if (addr > e->region_end)
lo = mid + 1;
else
return e;
}
return NULL;
}
static const struct unwind_table_entry *
find_unwind_entry(unsigned long addr)
{
struct unwind_table *table;
const struct unwind_table_entry *e = NULL;
if (addr >= kernel_unwind_table.start &&
addr <= kernel_unwind_table.end)
e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
else
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->start &&
addr <= table->end)
e = find_unwind_entry_in_table(table, addr);
if (e)
break;
}
return e;
}
static void
unwind_table_init(struct unwind_table *table, const char *name,
unsigned long base_addr, unsigned long gp,
void *table_start, void *table_end)
{
struct unwind_table_entry *start = table_start;
struct unwind_table_entry *end =
(struct unwind_table_entry *)table_end - 1;
table->name = name;
table->base_addr = base_addr;
table->gp = gp;
table->start = base_addr + start->region_start;
table->end = base_addr + end->region_end;
table->table = (struct unwind_table_entry *)table_start;
table->length = end - start + 1;
INIT_LIST_HEAD(&table->list);
for (; start <= end; start++) {
if (start < end &&
start->region_end > (start+1)->region_start) {
printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
}
start->region_start += base_addr;
start->region_end += base_addr;
}
}
static void
unwind_table_sort(struct unwind_table_entry *start,
struct unwind_table_entry *finish)
{
struct unwind_table_entry el, *p, *q;
for (p = start + 1; p < finish; ++p) {
if (p[0].region_start < p[-1].region_start) {
el = *p;
q = p;
do {
q[0] = q[-1];
--q;
} while (q > start &&
el.region_start < q[-1].region_start);
*q = el;
}
}
}
struct unwind_table *
unwind_table_add(const char *name, unsigned long base_addr,
unsigned long gp,
void *start, void *end)
{
struct unwind_table *table;
unsigned long flags;
struct unwind_table_entry *s = (struct unwind_table_entry *)start;
struct unwind_table_entry *e = (struct unwind_table_entry *)end;
unwind_table_sort(s, e);
table = kmalloc(sizeof(struct unwind_table), GFP_USER);
if (table == NULL)
return NULL;
unwind_table_init(table, name, base_addr, gp, start, end);
spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&table->list, &unwind_tables);
spin_unlock_irqrestore(&unwind_lock, flags);
return table;
}
void unwind_table_remove(struct unwind_table *table)
{
unsigned long flags;
spin_lock_irqsave(&unwind_lock, flags);
list_del(&table->list);
spin_unlock_irqrestore(&unwind_lock, flags);
kfree(table);
}
/* Called from setup_arch to import the kernel unwind info */
static int unwind_init(void)
{
long start, stop;
register unsigned long gp __asm__ ("r27");
start = (long)&__start___unwind[0];
stop = (long)&__stop___unwind[0];
spin_lock_init(&unwind_lock);
printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
start, stop,
(stop - start) / sizeof(struct unwind_table_entry));
unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
gp,
&__start___unwind[0], &__stop___unwind[0]);
#if 0
{
int i;
for (i = 0; i < 10; i++)
{
printk("region 0x%x-0x%x\n",
__start___unwind[i].region_start,
__start___unwind[i].region_end);
}
}
#endif
return 0;
}
static void unwind_frame_regs(struct unwind_frame_info *info)
{
const struct unwind_table_entry *e;
unsigned long npc;
unsigned int insn;
long frame_size = 0;
int looking_for_rp, rpoffset = 0;
e = find_unwind_entry(info->ip);
if (e == NULL) {
unsigned long sp;
extern char _stext[], _etext[];
dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
#ifdef CONFIG_KALLSYMS
/* Handle some frequent special cases.... */
{
char symname[KSYM_NAME_LEN+1];
char *modname;
unsigned long symsize, offset;
kallsyms_lookup(info->ip, &symsize, &offset,
&modname, symname);
dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
if (strcmp(symname, "_switch_to_ret") == 0) {
info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
dbg("_switch_to_ret @ %lx - setting "
"prev_sp=%lx prev_ip=%lx\n",
info->ip, info->prev_sp,
info->prev_ip);
return;
} else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
strcmp(symname, "syscall_exit") == 0) {
info->prev_ip = info->prev_sp = 0;
return;
}
}
#endif
/* Since we are doing the unwinding blind, we don't know if
we are adjusting the stack correctly or extracting the rp
correctly. The rp is checked to see if it belongs to the
kernel text section, if not we assume we don't have a
correct stack frame and we continue to unwind the stack.
This is not quite correct, and will fail for loadable
modules. */
sp = info->sp & ~63;
do {
unsigned long tmp;
info->prev_sp = sp - 64;
info->prev_ip = 0;
if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
break;
info->prev_ip = tmp;
sp = info->prev_sp;
} while (info->prev_ip < (unsigned long)_stext ||
info->prev_ip > (unsigned long)_etext);
info->rp = 0;
dbg("analyzing func @ %lx with no unwind info, setting "
"prev_sp=%lx prev_ip=%lx\n", info->ip,
info->prev_sp, info->prev_ip);
} else {
dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
"Save_RP = %d, Millicode = %d size = %u\n",
e->region_start, e->region_end, e->Save_SP, e->Save_RP,
e->Millicode, e->Total_frame_size);
looking_for_rp = e->Save_RP;
for (npc = e->region_start;
(frame_size < (e->Total_frame_size << 3) ||
looking_for_rp) &&
npc < info->ip;
npc += 4) {
insn = *(unsigned int *)npc;
if ((insn & 0xffffc000) == 0x37de0000 ||
(insn & 0xffe00000) == 0x6fc00000) {
/* ldo X(sp), sp, or stwm X,D(sp) */
frame_size += (insn & 0x1 ? -1 << 13 : 0) |
((insn & 0x3fff) >> 1);
dbg("analyzing func @ %lx, insn=%08x @ "
"%lx, frame_size = %ld\n", info->ip,
insn, npc, frame_size);
} else if ((insn & 0xffe00008) == 0x73c00008) {
/* std,ma X,D(sp) */
frame_size += (insn & 0x1 ? -1 << 13 : 0) |
(((insn >> 4) & 0x3ff) << 3);
dbg("analyzing func @ %lx, insn=%08x @ "
"%lx, frame_size = %ld\n", info->ip,
insn, npc, frame_size);
} else if (insn == 0x6bc23fd9) {
/* stw rp,-20(sp) */
rpoffset = 20;
looking_for_rp = 0;
dbg("analyzing func @ %lx, insn=stw rp,"
"-20(sp) @ %lx\n", info->ip, npc);
} else if (insn == 0x0fc212c1) {
/* std rp,-16(sr0,sp) */
rpoffset = 16;
looking_for_rp = 0;
dbg("analyzing func @ %lx, insn=std rp,"
"-16(sp) @ %lx\n", info->ip, npc);
}
}
info->prev_sp = info->sp - frame_size;
if (e->Millicode)
info->rp = info->r31;
else if (rpoffset)
info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
info->prev_ip = info->rp;
info->rp = 0;
dbg("analyzing func @ %lx, setting prev_sp=%lx "
"prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
info->prev_ip, npc);
}
}
void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
struct pt_regs *regs)
{
memset(info, 0, sizeof(struct unwind_frame_info));
info->t = t;
info->sp = regs->gr[30];
info->ip = regs->iaoq[0];
info->rp = regs->gr[2];
info->r31 = regs->gr[31];
dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
t ? (int)t->pid : -1, info->sp, info->ip);
}
void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
{
struct pt_regs *r = &t->thread.regs;
struct pt_regs *r2;
r2 = (struct pt_regs *)kmalloc(sizeof(struct pt_regs), GFP_KERNEL);
if (!r2)
return;
*r2 = *r;
r2->gr[30] = r->ksp;
r2->iaoq[0] = r->kpc;
unwind_frame_init(info, t, r2);
kfree(r2);
}
void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
{
unwind_frame_init(info, current, regs);
}
int unwind_once(struct unwind_frame_info *next_frame)
{
unwind_frame_regs(next_frame);
if (next_frame->prev_sp == 0 ||
next_frame->prev_ip == 0)
return -1;
next_frame->sp = next_frame->prev_sp;
next_frame->ip = next_frame->prev_ip;
next_frame->prev_sp = 0;
next_frame->prev_ip = 0;
dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
next_frame->t ? (int)next_frame->t->pid : -1,
next_frame->sp, next_frame->ip);
return 0;
}
int unwind_to_user(struct unwind_frame_info *info)
{
int ret;
do {
ret = unwind_once(info);
} while (!ret && !(info->ip & 3));
return ret;
}
module_init(unwind_init);

View File

@@ -0,0 +1,207 @@
/* Kernel link layout for various "sections"
*
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/config.h>
#include <asm-generic/vmlinux.lds.h>
/* needed for the processor specific cache alignment size */
#include <asm/cache.h>
#include <asm/page.h>
/* ld script to make hppa Linux kernel */
#ifndef CONFIG_64BIT
OUTPUT_FORMAT("elf32-hppa-linux")
OUTPUT_ARCH(hppa)
#else
OUTPUT_FORMAT("elf64-hppa-linux")
OUTPUT_ARCH(hppa:hppa2.0w)
#endif
ENTRY(_stext)
#ifndef CONFIG_64BIT
jiffies = jiffies_64 + 4;
#else
jiffies = jiffies_64;
#endif
SECTIONS
{
. = KERNEL_BINARY_TEXT_START;
_text = .; /* Text and read-only data */
.text ALIGN(16) : {
*(.text)
SCHED_TEXT
LOCK_TEXT
*(.text.do_softirq)
*(.text.sys_exit)
*(.text.do_sigaltstack)
*(.text.do_fork)
*(.text.*)
*(.fixup)
*(.lock.text) /* out-of-line lock text */
*(.gnu.warning)
} = 0
_etext = .; /* End of text section */
RODATA
/* writeable */
. = ALIGN(4096); /* Make sure this is paged aligned so
that we can properly leave these
as writable */
data_start = .;
. = ALIGN(16); /* Exception table */
__start___ex_table = .;
__ex_table : { *(__ex_table) }
__stop___ex_table = .;
__start___unwind = .; /* unwind info */
.PARISC.unwind : { *(.PARISC.unwind) }
__stop___unwind = .;
.data : { /* Data */
*(.data)
*(.data.vm0.pmd)
*(.data.vm0.pgd)
*(.data.vm0.pte)
CONSTRUCTORS
}
. = ALIGN(4096);
/* nosave data is really only used for software suspend...it's here
* just in case we ever implement it */
__nosave_begin = .;
.data_nosave : { *(.data.nosave) }
. = ALIGN(4096);
__nosave_end = .;
. = ALIGN(L1_CACHE_BYTES);
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
/* PA-RISC locks requires 16-byte alignment */
. = ALIGN(16);
.data.lock_aligned : { *(.data.lock_aligned) }
_edata = .; /* End of data section */
. = ALIGN(16384); /* init_task */
.data.init_task : { *(.data.init_task) }
/* The interrupt stack is currently partially coded, but not yet
* implemented */
. = ALIGN(16384);
init_istack : { *(init_istack) }
#ifdef CONFIG_64BIT
. = ALIGN(16); /* Linkage tables */
.opd : { *(.opd) } PROVIDE (__gp = .);
.plt : { *(.plt) }
.dlt : { *(.dlt) }
#endif
. = ALIGN(16384);
__init_begin = .;
.init.text : {
_sinittext = .;
*(.init.text)
_einittext = .;
}
.init.data : { *(.init.data) }
. = ALIGN(16);
__setup_start = .;
.init.setup : { *(.init.setup) }
__setup_end = .;
__initcall_start = .;
.initcall.init : {
*(.initcall1.init)
*(.initcall2.init)
*(.initcall3.init)
*(.initcall4.init)
*(.initcall5.init)
*(.initcall6.init)
*(.initcall7.init)
}
__initcall_end = .;
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
SECURITY_INIT
/* alternate instruction replacement. This is a mechanism x86 uses
* to detect the CPU type and replace generic instruction sequences
* with CPU specific ones. We don't currently do this in PA, but
* it seems like a good idea... */
. = ALIGN(4);
__alt_instructions = .;
.altinstructions : { *(.altinstructions) }
__alt_instructions_end = .;
.altinstr_replacement : { *(.altinstr_replacement) }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
.exit.text : { *(.exit.text) }
.exit.data : { *(.exit.data) }
. = ALIGN(4096);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
__initramfs_end = .;
. = ALIGN(32);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
. = ALIGN(4096);
__init_end = .;
/* freed after init ends here */
__bss_start = .; /* BSS */
.bss : { *(.bss) *(COMMON) }
__bss_stop = .;
_end = . ;
/* Sections to be discarded */
/DISCARD/ : {
*(.exitcall.exit)
#ifdef CONFIG_64BIT
/* temporary hack until binutils is fixed to not emit these
for static binaries */
*(.interp)
*(.dynsym)
*(.dynstr)
*(.dynamic)
*(.hash)
#endif
}
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
.note 0 : { *(.note) }
}