Merge git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6:
  parisc: convert to generic compat_sys_ptrace
  parisc: add rtc platform driver
  parisc: initialize unwinder much earlier
  parisc: add new syscalls
  parisc: hijack jump to start_kernel
  parisc: add pdc_coproc_cfg_unlocked and set_firmware_width_unlocked
  parisc: move include/asm-parisc to arch/parisc/include/asm
  parisc: move pdc_result to real2.S
  parisc: unify CCIO_COLLECT_STATS implementation
  parisc: add arch/parisc/kernel/.gitignore
  parisc: ropes.h - fix <asm-parisc/*> -> <asm/*>
  parisc: parisc-agp - fix <asm-parisc/*> -> <asm/*>

Resolve remove/rename conflict: include/asm-parisc/a.out.h is no longer
relevant.
This commit is contained in:
Linus Torvalds
2008-10-20 14:38:14 -07:00
138 changed files with 457 additions and 318 deletions

View File

@@ -0,0 +1,3 @@
include include/asm-generic/Kbuild.asm
unifdef-y += pdc.h

View File

@@ -0,0 +1,24 @@
#ifndef _ASM_PARISC_AGP_H
#define _ASM_PARISC_AGP_H
/*
* PARISC specific AGP definitions.
* Copyright (c) 2006 Kyle McMartin <kyle@parisc-linux.org>
*
*/
#define map_page_into_agp(page) /* nothing */
#define unmap_page_from_agp(page) /* nothing */
#define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif /* _ASM_PARISC_AGP_H */

View File

@@ -0,0 +1,183 @@
/*
* Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _PARISC_ASMREGS_H
#define _PARISC_ASMREGS_H
;! General Registers
rp: .reg %r2
arg3: .reg %r23
arg2: .reg %r24
arg1: .reg %r25
arg0: .reg %r26
dp: .reg %r27
ret0: .reg %r28
ret1: .reg %r29
sl: .reg %r29
sp: .reg %r30
#if 0
/* PA20_REVISIT */
arg7: .reg r19
arg6: .reg r20
arg5: .reg r21
arg4: .reg r22
gp: .reg r27
ap: .reg r29
#endif
r0: .reg %r0
r1: .reg %r1
r2: .reg %r2
r3: .reg %r3
r4: .reg %r4
r5: .reg %r5
r6: .reg %r6
r7: .reg %r7
r8: .reg %r8
r9: .reg %r9
r10: .reg %r10
r11: .reg %r11
r12: .reg %r12
r13: .reg %r13
r14: .reg %r14
r15: .reg %r15
r16: .reg %r16
r17: .reg %r17
r18: .reg %r18
r19: .reg %r19
r20: .reg %r20
r21: .reg %r21
r22: .reg %r22
r23: .reg %r23
r24: .reg %r24
r25: .reg %r25
r26: .reg %r26
r27: .reg %r27
r28: .reg %r28
r29: .reg %r29
r30: .reg %r30
r31: .reg %r31
;! Space Registers
sr0: .reg %sr0
sr1: .reg %sr1
sr2: .reg %sr2
sr3: .reg %sr3
sr4: .reg %sr4
sr5: .reg %sr5
sr6: .reg %sr6
sr7: .reg %sr7
;! Floating Point Registers
fr0: .reg %fr0
fr1: .reg %fr1
fr2: .reg %fr2
fr3: .reg %fr3
fr4: .reg %fr4
fr5: .reg %fr5
fr6: .reg %fr6
fr7: .reg %fr7
fr8: .reg %fr8
fr9: .reg %fr9
fr10: .reg %fr10
fr11: .reg %fr11
fr12: .reg %fr12
fr13: .reg %fr13
fr14: .reg %fr14
fr15: .reg %fr15
fr16: .reg %fr16
fr17: .reg %fr17
fr18: .reg %fr18
fr19: .reg %fr19
fr20: .reg %fr20
fr21: .reg %fr21
fr22: .reg %fr22
fr23: .reg %fr23
fr24: .reg %fr24
fr25: .reg %fr25
fr26: .reg %fr26
fr27: .reg %fr27
fr28: .reg %fr28
fr29: .reg %fr29
fr30: .reg %fr30
fr31: .reg %fr31
;! Control Registers
rctr: .reg %cr0
pidr1: .reg %cr8
pidr2: .reg %cr9
ccr: .reg %cr10
sar: .reg %cr11
pidr3: .reg %cr12
pidr4: .reg %cr13
iva: .reg %cr14
eiem: .reg %cr15
itmr: .reg %cr16
pcsq: .reg %cr17
pcoq: .reg %cr18
iir: .reg %cr19
isr: .reg %cr20
ior: .reg %cr21
ipsw: .reg %cr22
eirr: .reg %cr23
tr0: .reg %cr24
tr1: .reg %cr25
tr2: .reg %cr26
tr3: .reg %cr27
tr4: .reg %cr28
tr5: .reg %cr29
tr6: .reg %cr30
tr7: .reg %cr31
cr0: .reg %cr0
cr8: .reg %cr8
cr9: .reg %cr9
cr10: .reg %cr10
cr11: .reg %cr11
cr12: .reg %cr12
cr13: .reg %cr13
cr14: .reg %cr14
cr15: .reg %cr15
cr16: .reg %cr16
cr17: .reg %cr17
cr18: .reg %cr18
cr19: .reg %cr19
cr20: .reg %cr20
cr21: .reg %cr21
cr22: .reg %cr22
cr23: .reg %cr23
cr24: .reg %cr24
cr25: .reg %cr25
cr26: .reg %cr26
cr27: .reg %cr27
cr28: .reg %cr28
cr29: .reg %cr29
cr30: .reg %cr30
cr31: .reg %cr31
#endif

View File

@@ -0,0 +1,519 @@
/*
* Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
* Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org>
* Copyright (C) 1999 SuSE GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _PARISC_ASSEMBLY_H
#define _PARISC_ASSEMBLY_H
#define CALLEE_FLOAT_FRAME_SIZE 80
#ifdef CONFIG_64BIT
#define LDREG ldd
#define STREG std
#define LDREGX ldd,s
#define LDREGM ldd,mb
#define STREGM std,ma
#define SHRREG shrd
#define SHLREG shld
#define ANDCM andcm,*
#define COND(x) * ## x
#define RP_OFFSET 16
#define FRAME_SIZE 128
#define CALLEE_REG_FRAME_SIZE 144
#define ASM_ULONG_INSN .dword
#else /* CONFIG_64BIT */
#define LDREG ldw
#define STREG stw
#define LDREGX ldwx,s
#define LDREGM ldwm
#define STREGM stwm
#define SHRREG shr
#define SHLREG shlw
#define ANDCM andcm
#define COND(x) x
#define RP_OFFSET 20
#define FRAME_SIZE 64
#define CALLEE_REG_FRAME_SIZE 128
#define ASM_ULONG_INSN .word
#endif
#define CALLEE_SAVE_FRAME_SIZE (CALLEE_REG_FRAME_SIZE + CALLEE_FLOAT_FRAME_SIZE)
#ifdef CONFIG_PA20
#define LDCW ldcw,co
#define BL b,l
# ifdef CONFIG_64BIT
# define LEVEL 2.0w
# else
# define LEVEL 2.0
# endif
#else
#define LDCW ldcw
#define BL bl
#define LEVEL 1.1
#endif
#ifdef __ASSEMBLY__
#ifdef CONFIG_64BIT
/* the 64-bit pa gnu assembler unfortunately defaults to .level 1.1 or 2.0 so
* work around that for now... */
.level 2.0w
#endif
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/asmregs.h>
sp = 30
gp = 27
ipsw = 22
/*
* We provide two versions of each macro to convert from physical
* to virtual and vice versa. The "_r1" versions take one argument
* register, but trashes r1 to do the conversion. The other
* version takes two arguments: a src and destination register.
* However, the source and destination registers can not be
* the same register.
*/
.macro tophys grvirt, grphys
ldil L%(__PAGE_OFFSET), \grphys
sub \grvirt, \grphys, \grphys
.endm
.macro tovirt grphys, grvirt
ldil L%(__PAGE_OFFSET), \grvirt
add \grphys, \grvirt, \grvirt
.endm
.macro tophys_r1 gr
ldil L%(__PAGE_OFFSET), %r1
sub \gr, %r1, \gr
.endm
.macro tovirt_r1 gr
ldil L%(__PAGE_OFFSET), %r1
add \gr, %r1, \gr
.endm
.macro delay value
ldil L%\value, 1
ldo R%\value(1), 1
addib,UV,n -1,1,.
addib,NUV,n -1,1,.+8
nop
.endm
.macro debug value
.endm
/* Shift Left - note the r and t can NOT be the same! */
.macro shl r, sa, t
dep,z \r, 31-\sa, 32-\sa, \t
.endm
/* The PA 2.0 shift left */
.macro shlw r, sa, t
depw,z \r, 31-\sa, 32-\sa, \t
.endm
/* And the PA 2.0W shift left */
.macro shld r, sa, t
depd,z \r, 63-\sa, 64-\sa, \t
.endm
/* Shift Right - note the r and t can NOT be the same! */
.macro shr r, sa, t
extru \r, 31-\sa, 32-\sa, \t
.endm
/* pa20w version of shift right */
.macro shrd r, sa, t
extrd,u \r, 63-\sa, 64-\sa, \t
.endm
/* load 32-bit 'value' into 'reg' compensating for the ldil
* sign-extension when running in wide mode.
* WARNING!! neither 'value' nor 'reg' can be expressions
* containing '.'!!!! */
.macro load32 value, reg
ldil L%\value, \reg
ldo R%\value(\reg), \reg
.endm
.macro loadgp
#ifdef CONFIG_64BIT
ldil L%__gp, %r27
ldo R%__gp(%r27), %r27
#else
ldil L%$global$, %r27
ldo R%$global$(%r27), %r27
#endif
.endm
#define SAVE_SP(r, where) mfsp r, %r1 ! STREG %r1, where
#define REST_SP(r, where) LDREG where, %r1 ! mtsp %r1, r
#define SAVE_CR(r, where) mfctl r, %r1 ! STREG %r1, where
#define REST_CR(r, where) LDREG where, %r1 ! mtctl %r1, r
.macro save_general regs
STREG %r1, PT_GR1 (\regs)
STREG %r2, PT_GR2 (\regs)
STREG %r3, PT_GR3 (\regs)
STREG %r4, PT_GR4 (\regs)
STREG %r5, PT_GR5 (\regs)
STREG %r6, PT_GR6 (\regs)
STREG %r7, PT_GR7 (\regs)
STREG %r8, PT_GR8 (\regs)
STREG %r9, PT_GR9 (\regs)
STREG %r10, PT_GR10(\regs)
STREG %r11, PT_GR11(\regs)
STREG %r12, PT_GR12(\regs)
STREG %r13, PT_GR13(\regs)
STREG %r14, PT_GR14(\regs)
STREG %r15, PT_GR15(\regs)
STREG %r16, PT_GR16(\regs)
STREG %r17, PT_GR17(\regs)
STREG %r18, PT_GR18(\regs)
STREG %r19, PT_GR19(\regs)
STREG %r20, PT_GR20(\regs)
STREG %r21, PT_GR21(\regs)
STREG %r22, PT_GR22(\regs)
STREG %r23, PT_GR23(\regs)
STREG %r24, PT_GR24(\regs)
STREG %r25, PT_GR25(\regs)
/* r26 is saved in get_stack and used to preserve a value across virt_map */
STREG %r27, PT_GR27(\regs)
STREG %r28, PT_GR28(\regs)
/* r29 is saved in get_stack and used to point to saved registers */
/* r30 stack pointer saved in get_stack */
STREG %r31, PT_GR31(\regs)
.endm
.macro rest_general regs
/* r1 used as a temp in rest_stack and is restored there */
LDREG PT_GR2 (\regs), %r2
LDREG PT_GR3 (\regs), %r3
LDREG PT_GR4 (\regs), %r4
LDREG PT_GR5 (\regs), %r5
LDREG PT_GR6 (\regs), %r6
LDREG PT_GR7 (\regs), %r7
LDREG PT_GR8 (\regs), %r8
LDREG PT_GR9 (\regs), %r9
LDREG PT_GR10(\regs), %r10
LDREG PT_GR11(\regs), %r11
LDREG PT_GR12(\regs), %r12
LDREG PT_GR13(\regs), %r13
LDREG PT_GR14(\regs), %r14
LDREG PT_GR15(\regs), %r15
LDREG PT_GR16(\regs), %r16
LDREG PT_GR17(\regs), %r17
LDREG PT_GR18(\regs), %r18
LDREG PT_GR19(\regs), %r19
LDREG PT_GR20(\regs), %r20
LDREG PT_GR21(\regs), %r21
LDREG PT_GR22(\regs), %r22
LDREG PT_GR23(\regs), %r23
LDREG PT_GR24(\regs), %r24
LDREG PT_GR25(\regs), %r25
LDREG PT_GR26(\regs), %r26
LDREG PT_GR27(\regs), %r27
LDREG PT_GR28(\regs), %r28
/* r29 points to register save area, and is restored in rest_stack */
/* r30 stack pointer restored in rest_stack */
LDREG PT_GR31(\regs), %r31
.endm
.macro save_fp regs
fstd,ma %fr0, 8(\regs)
fstd,ma %fr1, 8(\regs)
fstd,ma %fr2, 8(\regs)
fstd,ma %fr3, 8(\regs)
fstd,ma %fr4, 8(\regs)
fstd,ma %fr5, 8(\regs)
fstd,ma %fr6, 8(\regs)
fstd,ma %fr7, 8(\regs)
fstd,ma %fr8, 8(\regs)
fstd,ma %fr9, 8(\regs)
fstd,ma %fr10, 8(\regs)
fstd,ma %fr11, 8(\regs)
fstd,ma %fr12, 8(\regs)
fstd,ma %fr13, 8(\regs)
fstd,ma %fr14, 8(\regs)
fstd,ma %fr15, 8(\regs)
fstd,ma %fr16, 8(\regs)
fstd,ma %fr17, 8(\regs)
fstd,ma %fr18, 8(\regs)
fstd,ma %fr19, 8(\regs)
fstd,ma %fr20, 8(\regs)
fstd,ma %fr21, 8(\regs)
fstd,ma %fr22, 8(\regs)
fstd,ma %fr23, 8(\regs)
fstd,ma %fr24, 8(\regs)
fstd,ma %fr25, 8(\regs)
fstd,ma %fr26, 8(\regs)
fstd,ma %fr27, 8(\regs)
fstd,ma %fr28, 8(\regs)
fstd,ma %fr29, 8(\regs)
fstd,ma %fr30, 8(\regs)
fstd %fr31, 0(\regs)
.endm
.macro rest_fp regs
fldd 0(\regs), %fr31
fldd,mb -8(\regs), %fr30
fldd,mb -8(\regs), %fr29
fldd,mb -8(\regs), %fr28
fldd,mb -8(\regs), %fr27
fldd,mb -8(\regs), %fr26
fldd,mb -8(\regs), %fr25
fldd,mb -8(\regs), %fr24
fldd,mb -8(\regs), %fr23
fldd,mb -8(\regs), %fr22
fldd,mb -8(\regs), %fr21
fldd,mb -8(\regs), %fr20
fldd,mb -8(\regs), %fr19
fldd,mb -8(\regs), %fr18
fldd,mb -8(\regs), %fr17
fldd,mb -8(\regs), %fr16
fldd,mb -8(\regs), %fr15
fldd,mb -8(\regs), %fr14
fldd,mb -8(\regs), %fr13
fldd,mb -8(\regs), %fr12
fldd,mb -8(\regs), %fr11
fldd,mb -8(\regs), %fr10
fldd,mb -8(\regs), %fr9
fldd,mb -8(\regs), %fr8
fldd,mb -8(\regs), %fr7
fldd,mb -8(\regs), %fr6
fldd,mb -8(\regs), %fr5
fldd,mb -8(\regs), %fr4
fldd,mb -8(\regs), %fr3
fldd,mb -8(\regs), %fr2
fldd,mb -8(\regs), %fr1
fldd,mb -8(\regs), %fr0
.endm
.macro callee_save_float
fstd,ma %fr12, 8(%r30)
fstd,ma %fr13, 8(%r30)
fstd,ma %fr14, 8(%r30)
fstd,ma %fr15, 8(%r30)
fstd,ma %fr16, 8(%r30)
fstd,ma %fr17, 8(%r30)
fstd,ma %fr18, 8(%r30)
fstd,ma %fr19, 8(%r30)
fstd,ma %fr20, 8(%r30)
fstd,ma %fr21, 8(%r30)
.endm
.macro callee_rest_float
fldd,mb -8(%r30), %fr21
fldd,mb -8(%r30), %fr20
fldd,mb -8(%r30), %fr19
fldd,mb -8(%r30), %fr18
fldd,mb -8(%r30), %fr17
fldd,mb -8(%r30), %fr16
fldd,mb -8(%r30), %fr15
fldd,mb -8(%r30), %fr14
fldd,mb -8(%r30), %fr13
fldd,mb -8(%r30), %fr12
.endm
#ifdef CONFIG_64BIT
.macro callee_save
std,ma %r3, CALLEE_REG_FRAME_SIZE(%r30)
mfctl %cr27, %r3
std %r4, -136(%r30)
std %r5, -128(%r30)
std %r6, -120(%r30)
std %r7, -112(%r30)
std %r8, -104(%r30)
std %r9, -96(%r30)
std %r10, -88(%r30)
std %r11, -80(%r30)
std %r12, -72(%r30)
std %r13, -64(%r30)
std %r14, -56(%r30)
std %r15, -48(%r30)
std %r16, -40(%r30)
std %r17, -32(%r30)
std %r18, -24(%r30)
std %r3, -16(%r30)
.endm
.macro callee_rest
ldd -16(%r30), %r3
ldd -24(%r30), %r18
ldd -32(%r30), %r17
ldd -40(%r30), %r16
ldd -48(%r30), %r15
ldd -56(%r30), %r14
ldd -64(%r30), %r13
ldd -72(%r30), %r12
ldd -80(%r30), %r11
ldd -88(%r30), %r10
ldd -96(%r30), %r9
ldd -104(%r30), %r8
ldd -112(%r30), %r7
ldd -120(%r30), %r6
ldd -128(%r30), %r5
ldd -136(%r30), %r4
mtctl %r3, %cr27
ldd,mb -CALLEE_REG_FRAME_SIZE(%r30), %r3
.endm
#else /* ! CONFIG_64BIT */
.macro callee_save
stw,ma %r3, CALLEE_REG_FRAME_SIZE(%r30)
mfctl %cr27, %r3
stw %r4, -124(%r30)
stw %r5, -120(%r30)
stw %r6, -116(%r30)
stw %r7, -112(%r30)
stw %r8, -108(%r30)
stw %r9, -104(%r30)
stw %r10, -100(%r30)
stw %r11, -96(%r30)
stw %r12, -92(%r30)
stw %r13, -88(%r30)
stw %r14, -84(%r30)
stw %r15, -80(%r30)
stw %r16, -76(%r30)
stw %r17, -72(%r30)
stw %r18, -68(%r30)
stw %r3, -64(%r30)
.endm
.macro callee_rest
ldw -64(%r30), %r3
ldw -68(%r30), %r18
ldw -72(%r30), %r17
ldw -76(%r30), %r16
ldw -80(%r30), %r15
ldw -84(%r30), %r14
ldw -88(%r30), %r13
ldw -92(%r30), %r12
ldw -96(%r30), %r11
ldw -100(%r30), %r10
ldw -104(%r30), %r9
ldw -108(%r30), %r8
ldw -112(%r30), %r7
ldw -116(%r30), %r6
ldw -120(%r30), %r5
ldw -124(%r30), %r4
mtctl %r3, %cr27
ldw,mb -CALLEE_REG_FRAME_SIZE(%r30), %r3
.endm
#endif /* ! CONFIG_64BIT */
.macro save_specials regs
SAVE_SP (%sr0, PT_SR0 (\regs))
SAVE_SP (%sr1, PT_SR1 (\regs))
SAVE_SP (%sr2, PT_SR2 (\regs))
SAVE_SP (%sr3, PT_SR3 (\regs))
SAVE_SP (%sr4, PT_SR4 (\regs))
SAVE_SP (%sr5, PT_SR5 (\regs))
SAVE_SP (%sr6, PT_SR6 (\regs))
SAVE_SP (%sr7, PT_SR7 (\regs))
SAVE_CR (%cr17, PT_IASQ0(\regs))
mtctl %r0, %cr17
SAVE_CR (%cr17, PT_IASQ1(\regs))
SAVE_CR (%cr18, PT_IAOQ0(\regs))
mtctl %r0, %cr18
SAVE_CR (%cr18, PT_IAOQ1(\regs))
#ifdef CONFIG_64BIT
/* cr11 (sar) is a funny one. 5 bits on PA1.1 and 6 bit on PA2.0
* For PA2.0 mtsar or mtctl always write 6 bits, but mfctl only
* reads 5 bits. Use mfctl,w to read all six bits. Otherwise
* we lose the 6th bit on a save/restore over interrupt.
*/
mfctl,w %cr11, %r1
STREG %r1, PT_SAR (\regs)
#else
SAVE_CR (%cr11, PT_SAR (\regs))
#endif
SAVE_CR (%cr19, PT_IIR (\regs))
/*
* Code immediately following this macro (in intr_save) relies
* on r8 containing ipsw.
*/
mfctl %cr22, %r8
STREG %r8, PT_PSW(\regs)
.endm
.macro rest_specials regs
REST_SP (%sr0, PT_SR0 (\regs))
REST_SP (%sr1, PT_SR1 (\regs))
REST_SP (%sr2, PT_SR2 (\regs))
REST_SP (%sr3, PT_SR3 (\regs))
REST_SP (%sr4, PT_SR4 (\regs))
REST_SP (%sr5, PT_SR5 (\regs))
REST_SP (%sr6, PT_SR6 (\regs))
REST_SP (%sr7, PT_SR7 (\regs))
REST_CR (%cr17, PT_IASQ0(\regs))
REST_CR (%cr17, PT_IASQ1(\regs))
REST_CR (%cr18, PT_IAOQ0(\regs))
REST_CR (%cr18, PT_IAOQ1(\regs))
REST_CR (%cr11, PT_SAR (\regs))
REST_CR (%cr22, PT_PSW (\regs))
.endm
/* First step to create a "relied upon translation"
* See PA 2.0 Arch. page F-4 and F-5.
*
* The ssm was originally necessary due to a "PCxT bug".
* But someone decided it needed to be added to the architecture
* and this "feature" went into rev3 of PA-RISC 1.1 Arch Manual.
* It's been carried forward into PA 2.0 Arch as well. :^(
*
* "ssm 0,%r0" is a NOP with side effects (prefetch barrier).
* rsm/ssm prevents the ifetch unit from speculatively fetching
* instructions past this line in the code stream.
* PA 2.0 processor will single step all insn in the same QUAD (4 insn).
*/
.macro pcxt_ssm_bug
rsm PSW_SM_I,%r0
nop /* 1 */
nop /* 2 */
nop /* 3 */
nop /* 4 */
nop /* 5 */
nop /* 6 */
nop /* 7 */
.endm
#endif /* __ASSEMBLY__ */
#endif

View File

@@ -0,0 +1,348 @@
/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
* Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
*/
#ifndef _ASM_PARISC_ATOMIC_H_
#define _ASM_PARISC_ATOMIC_H_
#include <linux/types.h>
#include <asm/system.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
* And probably incredibly slow on parisc. OTOH, we don't
* have to write any serious assembly. prumpf
*/
#ifdef CONFIG_SMP
#include <asm/spinlock.h>
#include <asm/cache.h> /* we use L1_CACHE_BYTES */
/* Use an array of spinlocks for our atomic_ts.
* Hash function to index into a different SPINLOCK.
* Since "a" is usually an address, use one spinlock per cacheline.
*/
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
raw_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
__raw_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
raw_spinlock_t *s = ATOMIC_HASH(l); \
__raw_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
#else
# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
#endif
/* This should get optimized out since it's never called.
** Or get a link error if xchg is used "wrong".
*/
extern void __xchg_called_with_bad_pointer(void);
/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
extern unsigned long __xchg8(char, char *);
extern unsigned long __xchg32(int, int *);
#ifdef CONFIG_64BIT
extern unsigned long __xchg64(unsigned long, unsigned long *);
#endif
/* optimizer better get rid of switch since size is a constant */
static __inline__ unsigned long
__xchg(unsigned long x, __volatile__ void * ptr, int size)
{
switch(size) {
#ifdef CONFIG_64BIT
case 8: return __xchg64(x,(unsigned long *) ptr);
#endif
case 4: return __xchg32((int) x, (int *) ptr);
case 1: return __xchg8((char) x, (char *) ptr);
}
__xchg_called_with_bad_pointer();
return x;
}
/*
** REVISIT - Abandoned use of LDCW in xchg() for now:
** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
** o and while we are at it, could CONFIG_64BIT code use LDCD too?
**
** if (__builtin_constant_p(x) && (x == NULL))
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define __HAVE_ARCH_CMPXCHG 1
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);
/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
/* don't worry...optimizer will get rid of most of this */
static __inline__ unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
switch(size) {
#ifdef CONFIG_64BIT
case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
}
__cmpxchg_called_with_bad_pointer();
return old;
}
#define cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})
#include <asm-generic/cmpxchg-local.h>
static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
unsigned long new_, int size)
{
switch (size) {
#ifdef CONFIG_64BIT
case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
#endif
case 4: return __cmpxchg_u32(ptr, old, new_);
default:
return __cmpxchg_local_generic(ptr, old, new_, size);
}
}
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#ifdef CONFIG_64BIT
#define cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#else
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
/* Note that we need not lock read accesses - aligned word writes/reads
* are atomic, so a reader never sees unconsistent values.
*
* Cache-line alignment would conflict with, for example, linux/module.h
*/
typedef struct { volatile int counter; } atomic_t;
/* It's possible to reduce all atomic operations to either
* __atomic_add_return, atomic_set and atomic_read (the latter
* is there only for consistency).
*/
static __inline__ int __atomic_add_return(int i, atomic_t *v)
{
int ret;
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
ret = (v->counter += i);
_atomic_spin_unlock_irqrestore(v, flags);
return ret;
}
static __inline__ void atomic_set(atomic_t *v, int i)
{
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
v->counter = i;
_atomic_spin_unlock_irqrestore(v, flags);
}
static __inline__ int atomic_read(const atomic_t *v)
{
return v->counter;
}
/* exported interface */
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v))))
#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
#define atomic_add_return(i,v) (__atomic_add_return( ((int)i),(v)))
#define atomic_sub_return(i,v) (__atomic_add_return(-((int)i),(v)))
#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
#define ATOMIC_INIT(i) ((atomic_t) { (i) })
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
#ifdef CONFIG_64BIT
typedef struct { volatile s64 counter; } atomic64_t;
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
static __inline__ int
__atomic64_add_return(s64 i, atomic64_t *v)
{
int ret;
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
ret = (v->counter += i);
_atomic_spin_unlock_irqrestore(v, flags);
return ret;
}
static __inline__ void
atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
v->counter = i;
_atomic_spin_unlock_irqrestore(v, flags);
}
static __inline__ s64
atomic64_read(const atomic64_t *v)
{
return v->counter;
}
#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)i),(v))))
#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)i),(v))))
#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)i),(v)))
#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)i),(v)))
#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
/* exported interface */
#define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic64_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c, old;
c = atomic64_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic64_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#endif /* CONFIG_64BIT */
#include <asm-generic/atomic.h>
#endif /* _ASM_PARISC_ATOMIC_H_ */

View File

@@ -0,0 +1,4 @@
#ifndef __ASMPARISC_AUXVEC_H
#define __ASMPARISC_AUXVEC_H
#endif

View File

@@ -0,0 +1,239 @@
#ifndef _PARISC_BITOPS_H
#define _PARISC_BITOPS_H
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
#include <asm/byteorder.h>
#include <asm/atomic.h>
/*
* HP-PARISC specific bit operations
* for a detailed description of the functions please refer
* to include/asm-i386/bitops.h or kerneldoc
*/
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
* on use of volatile and __*_bit() (set/clear/change):
* *_bit() want use of volatile.
* __*_bit() are "relaxed" and don't use spinlock or volatile.
*/
static __inline__ void set_bit(int nr, volatile unsigned long * addr)
{
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
_atomic_spin_lock_irqsave(addr, flags);
*addr |= mask;
_atomic_spin_unlock_irqrestore(addr, flags);
}
static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
{
unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
_atomic_spin_lock_irqsave(addr, flags);
*addr &= mask;
_atomic_spin_unlock_irqrestore(addr, flags);
}
static __inline__ void change_bit(int nr, volatile unsigned long * addr)
{
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
_atomic_spin_lock_irqsave(addr, flags);
*addr ^= mask;
_atomic_spin_unlock_irqrestore(addr, flags);
}
static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
{
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
unsigned long old;
unsigned long flags;
int set;
addr += (nr >> SHIFT_PER_LONG);
_atomic_spin_lock_irqsave(addr, flags);
old = *addr;
set = (old & mask) ? 1 : 0;
if (!set)
*addr = old | mask;
_atomic_spin_unlock_irqrestore(addr, flags);
return set;
}
static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
{
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
unsigned long old;
unsigned long flags;
int set;
addr += (nr >> SHIFT_PER_LONG);
_atomic_spin_lock_irqsave(addr, flags);
old = *addr;
set = (old & mask) ? 1 : 0;
if (set)
*addr = old & ~mask;
_atomic_spin_unlock_irqrestore(addr, flags);
return set;
}
static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
{
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
unsigned long oldbit;
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
_atomic_spin_lock_irqsave(addr, flags);
oldbit = *addr;
*addr = oldbit ^ mask;
_atomic_spin_unlock_irqrestore(addr, flags);
return (oldbit & mask) ? 1 : 0;
}
#include <asm-generic/bitops/non-atomic.h>
#ifdef __KERNEL__
/**
* __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1".
* @word: The word to search
*
* __ffs() return is undefined if no bit is set.
*
* 32-bit fast __ffs by LaMont Jones "lamont At hp com".
* 64-bit enhancement by Grant Grundler "grundler At parisc-linux org".
* (with help from willy/jejb to get the semantics right)
*
* This algorithm avoids branches by making use of nullification.
* One side effect of "extr" instructions is it sets PSW[N] bit.
* How PSW[N] (nullify next insn) gets set is determined by the
* "condition" field (eg "<>" or "TR" below) in the extr* insn.
* Only the 1st and one of either the 2cd or 3rd insn will get executed.
* Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so
* cycles for each mispredicted branch.
*/
static __inline__ unsigned long __ffs(unsigned long x)
{
unsigned long ret;
__asm__(
#ifdef CONFIG_64BIT
" ldi 63,%1\n"
" extrd,u,*<> %0,63,32,%%r0\n"
" extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */
" addi -32,%1,%1\n"
#else
" ldi 31,%1\n"
#endif
" extru,<> %0,31,16,%%r0\n"
" extru,TR %0,15,16,%0\n" /* xxxx0000 -> 0000xxxx */
" addi -16,%1,%1\n"
" extru,<> %0,31,8,%%r0\n"
" extru,TR %0,23,8,%0\n" /* 0000xx00 -> 000000xx */
" addi -8,%1,%1\n"
" extru,<> %0,31,4,%%r0\n"
" extru,TR %0,27,4,%0\n" /* 000000x0 -> 0000000x */
" addi -4,%1,%1\n"
" extru,<> %0,31,2,%%r0\n"
" extru,TR %0,29,2,%0\n" /* 0000000y, 1100b -> 0011b */
" addi -2,%1,%1\n"
" extru,= %0,31,1,%%r0\n" /* check last bit */
" addi -1,%1,%1\n"
: "+r" (x), "=r" (ret) );
return ret;
}
#include <asm-generic/bitops/ffz.h>
/*
* ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
* This is defined the same way as the libc and compiler builtin
* ffs routines, therefore differs in spirit from the above ffz (man ffs).
*/
static __inline__ int ffs(int x)
{
return x ? (__ffs((unsigned long)x) + 1) : 0;
}
/*
* fls: find last (most significant) bit set.
* fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static __inline__ int fls(int x)
{
int ret;
if (!x)
return 0;
__asm__(
" ldi 1,%1\n"
" extru,<> %0,15,16,%%r0\n"
" zdep,TR %0,15,16,%0\n" /* xxxx0000 */
" addi 16,%1,%1\n"
" extru,<> %0,7,8,%%r0\n"
" zdep,TR %0,23,24,%0\n" /* xx000000 */
" addi 8,%1,%1\n"
" extru,<> %0,3,4,%%r0\n"
" zdep,TR %0,27,28,%0\n" /* x0000000 */
" addi 4,%1,%1\n"
" extru,<> %0,1,2,%%r0\n"
" zdep,TR %0,29,30,%0\n" /* y0000000 (y&3 = 0) */
" addi 2,%1,%1\n"
" extru,= %0,0,1,%%r0\n"
" addi 1,%1,%1\n" /* if y & 8, add 1 */
: "+r" (x), "=r" (ret) );
return ret;
}
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#endif /* __KERNEL__ */
#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
#include <asm-generic/bitops/ext2-non-atomic.h>
/* '3' is bits per byte */
#define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3)
#define ext2_set_bit_atomic(l,nr,addr) \
test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
#define ext2_clear_bit_atomic(l,nr,addr) \
test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
#endif /* __KERNEL__ */
#include <asm-generic/bitops/minix-le.h>
#endif /* _PARISC_BITOPS_H */

View File

@@ -0,0 +1,92 @@
#ifndef _PARISC_BUG_H
#define _PARISC_BUG_H
/*
* Tell the user there is some problem.
* The offending file and line are encoded in the __bug_table section.
*/
#ifdef CONFIG_BUG
#define HAVE_ARCH_BUG
#define HAVE_ARCH_WARN_ON
/* the break instruction is used as BUG() marker. */
#define PARISC_BUG_BREAK_ASM "break 0x1f, 0x1fff"
#define PARISC_BUG_BREAK_INSN 0x03ffe01f /* PARISC_BUG_BREAK_ASM */
#if defined(CONFIG_64BIT)
#define ASM_WORD_INSN ".dword\t"
#else
#define ASM_WORD_INSN ".word\t"
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define BUG() \
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"a\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \
"\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (0), "i" (sizeof(struct bug_entry)) ); \
for(;;) ; \
} while(0)
#else
#define BUG() \
do { \
asm volatile(PARISC_BUG_BREAK_ASM : : ); \
for(;;) ; \
} while(0)
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define __WARN() \
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"a\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \
"\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (BUGFLAG_WARNING), \
"i" (sizeof(struct bug_entry)) ); \
} while(0)
#else
#define __WARN() \
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"a\"\n" \
"2:\t" ASM_WORD_INSN "1b\n" \
"\t.short %c0\n" \
"\t.org 2b+%c1\n" \
"\t.popsection" \
: : "i" (BUGFLAG_WARNING), \
"i" (sizeof(struct bug_entry)) ); \
} while(0)
#endif
#define WARN_ON(x) ({ \
int __ret_warn_on = !!(x); \
if (__builtin_constant_p(__ret_warn_on)) { \
if (__ret_warn_on) \
__WARN(); \
} else { \
if (unlikely(__ret_warn_on)) \
__WARN(); \
} \
unlikely(__ret_warn_on); \
})
#endif
#include <asm-generic/bug.h>
#endif

View File

@@ -0,0 +1,19 @@
/*
* include/asm-parisc/bugs.h
*
* Copyright (C) 1999 Mike Shaver
*/
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Needs:
* void check_bugs(void);
*/
#include <asm/processor.h>
static inline void check_bugs(void)
{
// identify_cpu(&boot_cpu_data);
}

View File

@@ -0,0 +1,82 @@
#ifndef _PARISC_BYTEORDER_H
#define _PARISC_BYTEORDER_H
#include <asm/types.h>
#include <linux/compiler.h>
#ifdef __GNUC__
static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
{
__asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */
"shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */
: "=r" (x)
: "0" (x));
return x;
}
static __inline__ __attribute_const__ __u32 ___arch__swab24(__u32 x)
{
__asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */
"dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */
"shd %%r0, %0, 8, %0" /* shift 0000cbab -> 0cba */
: "=r" (x)
: "0" (x));
return x;
}
static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
{
unsigned int temp;
__asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */
"dep %1, 15, 8, %1\n\t" /* deposit cdab -> cbab */
"shd %0, %1, 8, %0" /* shift abcdcbab -> dcba */
: "=r" (x), "=&r" (temp)
: "0" (x));
return x;
}
#if BITS_PER_LONG > 32
/*
** From "PA-RISC 2.0 Architecture", HP Professional Books.
** See Appendix I page 8 , "Endian Byte Swapping".
**
** Pretty cool algorithm: (* == zero'd bits)
** PERMH 01234567 -> 67452301 into %0
** HSHL 67452301 -> 7*5*3*1* into %1
** HSHR 67452301 -> *6*4*2*0 into %0
** OR %0 | %1 -> 76543210 into %0 (all done!)
*/
static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) {
__u64 temp;
__asm__("permh,3210 %0, %0\n\t"
"hshl %0, 8, %1\n\t"
"hshr,u %0, 8, %0\n\t"
"or %1, %0, %0"
: "=r" (x), "=&r" (temp)
: "0" (x));
return x;
}
#define __arch__swab64(x) ___arch__swab64(x)
#define __BYTEORDER_HAS_U64__
#elif !defined(__STRICT_ANSI__)
static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
{
__u32 t1 = ___arch__swab32((__u32) x);
__u32 t2 = ___arch__swab32((__u32) (x >> 32));
return (((__u64) t1 << 32) | t2);
}
#define __arch__swab64(x) ___arch__swab64(x)
#define __BYTEORDER_HAS_U64__
#endif
#define __arch__swab16(x) ___arch__swab16(x)
#define __arch__swab24(x) ___arch__swab24(x)
#define __arch__swab32(x) ___arch__swab32(x)
#endif /* __GNUC__ */
#include <linux/byteorder/big_endian.h>
#endif /* _PARISC_BYTEORDER_H */

View File

@@ -0,0 +1,60 @@
/*
* include/asm-parisc/cache.h
*/
#ifndef __ARCH_PARISC_CACHE_H
#define __ARCH_PARISC_CACHE_H
/*
* PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
* 32-byte cachelines. The default configuration is not for SMP anyway,
* so if you're building for SMP, you should select the appropriate
* processor type. There is a potential livelock danger when running
* a machine with this value set too small, but it's more probable you'll
* just ruin performance.
*/
#ifdef CONFIG_PA20
#define L1_CACHE_BYTES 64
#define L1_CACHE_SHIFT 6
#else
#define L1_CACHE_BYTES 32
#define L1_CACHE_SHIFT 5
#endif
#ifndef __ASSEMBLY__
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
void parisc_cache_init(void); /* initializes cache-flushing */
void disable_sr_hashing_asm(int); /* low level support for above */
void disable_sr_hashing(void); /* turns off space register hashing */
void free_sid(unsigned long);
unsigned long alloc_sid(void);
struct seq_file;
extern void show_cache_info(struct seq_file *m);
extern int split_tlb;
extern int dcache_stride;
extern int icache_stride;
extern struct pdc_cache_info cache_info;
void parisc_setup_cache_timing(void);
#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" : : "r" (addr));
#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" : : "r" (addr));
#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" : : "r" (addr));
#endif /* ! __ASSEMBLY__ */
/* Classes of processor wrt: disabling space register hashing */
#define SRHASH_PCXST 0 /* pcxs, pcxt, pcxt_ */
#define SRHASH_PCXL 1 /* pcxl */
#define SRHASH_PA20 2 /* pcxu, pcxu_, pcxw, pcxw_ */
#endif

View File

@@ -0,0 +1,121 @@
#ifndef _PARISC_CACHEFLUSH_H
#define _PARISC_CACHEFLUSH_H
#include <linux/mm.h>
/* The usual comment is "Caches aren't brain-dead on the <architecture>".
* Unfortunately, that doesn't apply to PA-RISC. */
/* Internal implementation */
void flush_data_cache_local(void *); /* flushes local data-cache only */
void flush_instruction_cache_local(void *); /* flushes local code-cache only */
#ifdef CONFIG_SMP
void flush_data_cache(void); /* flushes data-cache only (all processors) */
void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
#else
#define flush_data_cache() flush_data_cache_local(NULL)
#define flush_instruction_cache() flush_instruction_cache_local(NULL)
#endif
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
void flush_user_icache_range_asm(unsigned long, unsigned long);
void flush_kernel_icache_range_asm(unsigned long, unsigned long);
void flush_user_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_page_asm(void *);
void flush_kernel_icache_page(void *);
void flush_user_dcache_page(unsigned long);
void flush_user_icache_page(unsigned long);
void flush_user_dcache_range(unsigned long, unsigned long);
void flush_user_icache_range(unsigned long, unsigned long);
/* Cache flush operations */
void flush_cache_all_local(void);
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) \
spin_lock_irq(&(mapping)->tree_lock)
#define flush_dcache_mmap_unlock(mapping) \
spin_unlock_irq(&(mapping)->tree_lock)
#define flush_icache_page(vma,page) do { \
flush_kernel_dcache_page(page); \
flush_kernel_icache_page(page_address(page)); \
} while (0)
#define flush_icache_range(s,e) do { \
flush_kernel_dcache_range_asm(s,e); \
flush_kernel_icache_range_asm(s,e); \
} while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
memcpy(dst, src, len); \
flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
memcpy(dst, src, len); \
} while (0)
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
#define ARCH_HAS_FLUSH_ANON_PAGE
static inline void
flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
{
if (PageAnon(page))
flush_user_dcache_page(vmaddr);
}
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
void flush_kernel_dcache_page_addr(void *addr);
static inline void flush_kernel_dcache_page(struct page *page)
{
flush_kernel_dcache_page_addr(page_address(page));
}
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
#endif
#ifdef CONFIG_PA8X00
/* Only pa8800, pa8900 needs this */
#define ARCH_HAS_KMAP
void kunmap_parisc(void *addr);
static inline void *kmap(struct page *page)
{
might_sleep();
return page_address(page);
}
#define kunmap(page) kunmap_parisc(page_address(page))
#define kmap_atomic(page, idx) page_address(page)
#define kunmap_atomic(addr, idx) kunmap_parisc(addr)
#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#endif
#endif /* _PARISC_CACHEFLUSH_H */

View File

@@ -0,0 +1,210 @@
#ifndef _PARISC_CHECKSUM_H
#define _PARISC_CHECKSUM_H
#include <linux/in6.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern __wsum csum_partial(const void *, int, __wsum);
/*
* The same as csum_partial, but copies from src while it checksums.
*
* Here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum);
/*
* this is a new version of the above that records errors it finds in *errp,
* but continues and zeros the rest of the buffer.
*/
extern __wsum csum_partial_copy_from_user(const void __user *src,
void *dst, int len, __wsum sum, int *errp);
/*
* Optimized for IP headers, which always checksum on 4 octet boundaries.
*
* Written by Randolph Chung <tausq@debian.org>, and then mucked with by
* LaMont Jones <lamont@debian.org>
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
__asm__ __volatile__ (
" ldws,ma 4(%1), %0\n"
" addib,<= -4, %2, 2f\n"
"\n"
" ldws 4(%1), %%r20\n"
" ldws 8(%1), %%r21\n"
" add %0, %%r20, %0\n"
" ldws,ma 12(%1), %%r19\n"
" addc %0, %%r21, %0\n"
" addc %0, %%r19, %0\n"
"1: ldws,ma 4(%1), %%r19\n"
" addib,< 0, %2, 1b\n"
" addc %0, %%r19, %0\n"
"\n"
" extru %0, 31, 16, %%r20\n"
" extru %0, 15, 16, %%r21\n"
" addc %%r20, %%r21, %0\n"
" extru %0, 15, 16, %%r21\n"
" add %0, %%r21, %0\n"
" subi -1, %0, %0\n"
"2:\n"
: "=r" (sum), "=r" (iph), "=r" (ihl)
: "1" (iph), "2" (ihl)
: "r19", "r20", "r21", "memory");
return (__force __sum16)sum;
}
/*
* Fold a partial checksum
*/
static inline __sum16 csum_fold(__wsum csum)
{
u32 sum = (__force u32)csum;
/* add the swapped two 16-bit halves of sum,
a possible carry from adding the two 16-bit halves,
will carry from the lower half into the upper half,
giving us the correct sum in the upper half. */
sum += (sum << 16) + (sum >> 16);
return (__force __sum16)(~sum >> 16);
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
__asm__(
" add %1, %0, %0\n"
" addc %2, %0, %0\n"
" addc %3, %0, %0\n"
" addc %%r0, %0, %0\n"
: "=r" (sum)
: "r" (daddr), "r"(saddr), "r"(proto+len), "0"(sum));
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static inline __sum16 ip_compute_csum(const void *buf, int len)
{
return csum_fold (csum_partial(buf, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__ __volatile__ (
#if BITS_PER_LONG > 32
/*
** We can execute two loads and two adds per cycle on PA 8000.
** But add insn's get serialized waiting for the carry bit.
** Try to keep 4 registers with "live" values ahead of the ALU.
*/
" ldd,ma 8(%1), %%r19\n" /* get 1st saddr word */
" ldd,ma 8(%2), %%r20\n" /* get 1st daddr word */
" add %8, %3, %3\n"/* add 16-bit proto + len */
" add %%r19, %0, %0\n"
" ldd,ma 8(%1), %%r21\n" /* 2cd saddr */
" ldd,ma 8(%2), %%r22\n" /* 2cd daddr */
" add,dc %%r20, %0, %0\n"
" add,dc %%r21, %0, %0\n"
" add,dc %%r22, %0, %0\n"
" add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */
" extrd,u %0, 31, 32, %%r19\n" /* copy upper half down */
" depdi 0, 31, 32, %0\n" /* clear upper half */
" add %%r19, %0, %0\n" /* fold into 32-bits */
" addc 0, %0, %0\n" /* add carry */
#else
/*
** For PA 1.x, the insn order doesn't matter as much.
** Insn stream is serialized on the carry bit here too.
** result from the previous operation (eg r0 + x)
*/
" ldw,ma 4(%1), %%r19\n" /* get 1st saddr word */
" ldw,ma 4(%2), %%r20\n" /* get 1st daddr word */
" add %8, %3, %3\n" /* add 16-bit proto + len */
" add %%r19, %0, %0\n"
" ldw,ma 4(%1), %%r21\n" /* 2cd saddr */
" addc %%r20, %0, %0\n"
" ldw,ma 4(%2), %%r22\n" /* 2cd daddr */
" addc %%r21, %0, %0\n"
" ldw,ma 4(%1), %%r19\n" /* 3rd saddr */
" addc %%r22, %0, %0\n"
" ldw,ma 4(%2), %%r20\n" /* 3rd daddr */
" addc %%r19, %0, %0\n"
" ldw,ma 4(%1), %%r21\n" /* 4th saddr */
" addc %%r20, %0, %0\n"
" ldw,ma 4(%2), %%r22\n" /* 4th daddr */
" addc %%r21, %0, %0\n"
" addc %%r22, %0, %0\n"
" addc %3, %0, %0\n" /* fold in proto+len, catch carry */
#endif
: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len)
: "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto)
: "r19", "r20", "r21", "r22");
return csum_fold(sum);
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static __inline__ __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
int len, __wsum sum,
int *err_ptr)
{
/* code stolen from include/asm-mips64 */
sum = csum_partial(src, len, sum);
if (copy_to_user(dst, src, len)) {
*err_ptr = -EFAULT;
return (__force __wsum)-1;
}
return sum;
}
#endif

View File

@@ -0,0 +1,165 @@
#ifndef _ASM_PARISC_COMPAT_H
#define _ASM_PARISC_COMPAT_H
/*
* Architecture specific compatibility types
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#define COMPAT_USER_HZ 100
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef s32 compat_pid_t;
typedef u32 __compat_uid_t;
typedef u32 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
typedef u32 compat_ino_t;
typedef u32 compat_dev_t;
typedef s32 compat_off_t;
typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef s32 compat_timer_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef s64 compat_s64;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u64 compat_u64;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
};
struct compat_timeval {
compat_time_t tv_sec;
s32 tv_usec;
};
struct compat_stat {
compat_dev_t st_dev; /* dev_t is 32 bits on parisc */
compat_ino_t st_ino; /* 32 bits */
compat_mode_t st_mode; /* 16 bits */
compat_nlink_t st_nlink; /* 16 bits */
u16 st_reserved1; /* old st_uid */
u16 st_reserved2; /* old st_gid */
compat_dev_t st_rdev;
compat_off_t st_size;
compat_time_t st_atime;
u32 st_atime_nsec;
compat_time_t st_mtime;
u32 st_mtime_nsec;
compat_time_t st_ctime;
u32 st_ctime_nsec;
s32 st_blksize;
s32 st_blocks;
u32 __unused1; /* ACL stuff */
compat_dev_t __unused2; /* network */
compat_ino_t __unused3; /* network */
u32 __unused4; /* cnodes */
u16 __unused5; /* netsite */
short st_fstype;
compat_dev_t st_realdev;
u16 st_basemode;
u16 st_spareshort;
__compat_uid32_t st_uid;
__compat_gid32_t st_gid;
u32 st_spare4[3];
};
struct compat_flock {
short l_type;
short l_whence;
compat_off_t l_start;
compat_off_t l_len;
compat_pid_t l_pid;
};
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
};
struct compat_statfs {
s32 f_type;
s32 f_bsize;
s32 f_blocks;
s32 f_bfree;
s32 f_bavail;
s32 f_files;
s32 f_ffree;
__kernel_fsid_t f_fsid;
s32 f_namelen;
s32 f_frsize;
s32 f_spare[5];
};
struct compat_sigcontext {
compat_int_t sc_flags;
compat_int_t sc_gr[32]; /* PSW in sc_gr[0] */
u64 sc_fr[32];
compat_int_t sc_iasq[2];
compat_int_t sc_iaoq[2];
compat_int_t sc_sar; /* cr11 */
};
#define COMPAT_RLIM_INFINITY 0xffffffff
typedef u32 compat_old_sigset_t; /* at least 32 bits */
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
* as pointers because the syscall entry code will have
* appropriately converted them already.
*/
typedef u32 compat_uptr_t;
static inline void __user *compat_ptr(compat_uptr_t uptr)
{
return (void __user *)(unsigned long)uptr;
}
static inline compat_uptr_t ptr_to_compat(void __user *uptr)
{
return (u32)(unsigned long)uptr;
}
static __inline__ void __user *compat_alloc_user_space(long len)
{
struct pt_regs *regs = &current->thread.regs;
return (void __user *)regs->gr[30];
}
static inline int __is_compat_task(struct task_struct *t)
{
return test_ti_thread_flag(task_thread_info(t), TIF_32BIT);
}
static inline int is_compat_task(void)
{
return __is_compat_task(current);
}
#endif /* _ASM_PARISC_COMPAT_H */

View File

@@ -0,0 +1,50 @@
#include<linux/compat.h>
#include<linux/compat_siginfo.h>
#include<asm/compat_ucontext.h>
#ifndef _ASM_PARISC_COMPAT_RT_SIGFRAME_H
#define _ASM_PARISC_COMPAT_RT_SIGFRAME_H
/* In a deft move of uber-hackery, we decide to carry the top half of all
* 64-bit registers in a non-portable, non-ABI, hidden structure.
* Userspace can read the hidden structure if it *wants* but is never
* guaranteed to be in the same place. Infact the uc_sigmask from the
* ucontext_t structure may push the hidden register file downards
*/
struct compat_regfile {
/* Upper half of all the 64-bit registers that were truncated
on a copy to a 32-bit userspace */
compat_int_t rf_gr[32];
compat_int_t rf_iasq[2];
compat_int_t rf_iaoq[2];
compat_int_t rf_sar;
};
#define COMPAT_SIGRETURN_TRAMP 4
#define COMPAT_SIGRESTARTBLOCK_TRAMP 5
#define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + COMPAT_SIGRESTARTBLOCK_TRAMP)
struct compat_rt_sigframe {
/* XXX: Must match trampoline size in arch/parisc/kernel/signal.c
Secondary to that it must protect the ERESTART_RESTARTBLOCK
trampoline we left on the stack (we were bad and didn't
change sp so we could run really fast.) */
compat_uint_t tramp[COMPAT_TRAMP_SIZE];
compat_siginfo_t info;
struct compat_ucontext uc;
/* Hidden location of truncated registers, *must* be last. */
struct compat_regfile regs;
};
/*
* The 32-bit ABI wants at least 48 bytes for a function call frame:
* 16 bytes for arg0-arg3, and 32 bytes for magic (the only part of
* which Linux/parisc uses is sp-20 for the saved return pointer...)
* Then, the stack pointer must be rounded to a cache line (64 bytes).
*/
#define SIGFRAME32 64
#define FUNCTIONCALLFRAME32 48
#define PARISC_RT_SIGFRAME_SIZE32 \
(((sizeof(struct compat_rt_sigframe) + FUNCTIONCALLFRAME32) + SIGFRAME32) & -SIGFRAME32)
#endif

View File

@@ -0,0 +1,2 @@
/* Use generic */
#include <asm-generic/compat_signal.h>

View File

@@ -0,0 +1,17 @@
#ifndef _ASM_PARISC_COMPAT_UCONTEXT_H
#define _ASM_PARISC_COMPAT_UCONTEXT_H
#include <linux/compat.h>
/* 32-bit ucontext as seen from an 64-bit kernel */
struct compat_ucontext {
compat_uint_t uc_flags;
compat_uptr_t uc_link;
compat_stack_t uc_stack; /* struct compat_sigaltstack (12 bytes)*/
/* FIXME: Pad out to get uc_mcontext to start at an 8-byte aligned boundary */
compat_uint_t pad[1];
struct compat_sigcontext uc_mcontext;
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
#endif /* !_ASM_PARISC_COMPAT_UCONTEXT_H */

View File

@@ -0,0 +1,6 @@
#ifndef __PARISC_CPUTIME_H
#define __PARISC_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __PARISC_CPUTIME_H */

View File

@@ -0,0 +1,15 @@
#ifndef _PARISC_CURRENT_H
#define _PARISC_CURRENT_H
#include <linux/thread_info.h>
struct task_struct;
static inline struct task_struct * get_current(void)
{
return current_thread_info()->task;
}
#define current get_current()
#endif /* !(_PARISC_CURRENT_H) */

View File

@@ -0,0 +1,43 @@
#ifndef _PARISC_DELAY_H
#define _PARISC_DELAY_H
#include <asm/system.h> /* for mfctl() */
#include <asm/processor.h> /* for boot_cpu_data */
/*
* Copyright (C) 1993 Linus Torvalds
*
* Delay routines
*/
static __inline__ void __delay(unsigned long loops) {
asm volatile(
" .balignl 64,0x34000034\n"
" addib,UV -1,%0,.\n"
" nop\n"
: "=r" (loops) : "0" (loops));
}
static __inline__ void __cr16_delay(unsigned long clocks) {
unsigned long start;
/*
* Note: Due to unsigned math, cr16 rollovers shouldn't be
* a problem here. However, on 32 bit, we need to make sure
* we don't pass in too big a value. The current default
* value of MAX_UDELAY_MS should help prevent this.
*/
start = mfctl(16);
while ((mfctl(16) - start) < clocks)
;
}
static __inline__ void __udelay(unsigned long usecs) {
__cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL));
}
#define udelay(n) __udelay(n)
#endif /* defined(_PARISC_DELAY_H) */

View File

@@ -0,0 +1,7 @@
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
#include <asm-generic/device.h>

View File

@@ -0,0 +1 @@
#include <asm-generic/div64.h>

View File

@@ -0,0 +1,253 @@
#ifndef _PARISC_DMA_MAPPING_H
#define _PARISC_DMA_MAPPING_H
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/scatterlist.h>
/* See Documentation/DMA-mapping.txt */
struct hppa_dma_ops {
int (*dma_supported)(struct device *dev, u64 mask);
void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction);
void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction);
int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction);
void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nhwents, enum dma_data_direction direction);
void (*dma_sync_single_for_cpu)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
void (*dma_sync_single_for_device)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
void (*dma_sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
void (*dma_sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
};
/*
** We could live without the hppa_dma_ops indirection if we didn't want
** to support 4 different coherent dma models with one binary (they will
** someday be loadable modules):
** I/O MMU consistent method dma_sync behavior
** ============= ====================== =======================
** a) PA-7x00LC uncachable host memory flush/purge
** b) U2/Uturn cachable host memory NOP
** c) Ike/Astro cachable host memory NOP
** d) EPIC/SAGA memory on EPIC/SAGA flush/reset DMA channel
**
** PA-7[13]00LC processors have a GSC bus interface and no I/O MMU.
**
** Systems (eg PCX-T workstations) that don't fall into the above
** categories will need to modify the needed drivers to perform
** flush/purge and allocate "regular" cacheable pages for everything.
*/
#ifdef CONFIG_PA11
extern struct hppa_dma_ops pcxl_dma_ops;
extern struct hppa_dma_ops pcx_dma_ops;
#endif
extern struct hppa_dma_ops *hppa_dma_ops;
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag)
{
return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag);
}
static inline void *
dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag)
{
return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag);
}
static inline void
dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
}
static inline void
dma_free_noncoherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
}
static inline dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
return hppa_dma_ops->map_single(dev, ptr, size, direction);
}
static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
hppa_dma_ops->unmap_single(dev, dma_addr, size, direction);
}
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
return hppa_dma_ops->map_sg(dev, sg, nents, direction);
}
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
hppa_dma_ops->unmap_sg(dev, sg, nhwentries, direction);
}
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction)
{
return dma_map_single(dev, (page_address(page) + (offset)), size, direction);
}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
dma_unmap_single(dev, dma_address, size, direction);
}
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
if(hppa_dma_ops->dma_sync_single_for_cpu)
hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, 0, size, direction);
}
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
if(hppa_dma_ops->dma_sync_single_for_device)
hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, 0, size, direction);
}
static inline void
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
if(hppa_dma_ops->dma_sync_single_for_cpu)
hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, offset, size, direction);
}
static inline void
dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
if(hppa_dma_ops->dma_sync_single_for_device)
hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, offset, size, direction);
}
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
if(hppa_dma_ops->dma_sync_sg_for_cpu)
hppa_dma_ops->dma_sync_sg_for_cpu(dev, sg, nelems, direction);
}
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
if(hppa_dma_ops->dma_sync_sg_for_device)
hppa_dma_ops->dma_sync_sg_for_device(dev, sg, nelems, direction);
}
static inline int
dma_supported(struct device *dev, u64 mask)
{
return hppa_dma_ops->dma_supported(dev, mask);
}
static inline int
dma_set_mask(struct device *dev, u64 mask)
{
if(!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
static inline int
dma_get_cache_alignment(void)
{
return dcache_stride;
}
static inline int
dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return (hppa_dma_ops->dma_sync_single_for_cpu == NULL);
}
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
if(hppa_dma_ops->dma_sync_single_for_cpu)
flush_kernel_dcache_range((unsigned long)vaddr, size);
}
static inline void *
parisc_walk_tree(struct device *dev)
{
struct device *otherdev;
if(likely(dev->platform_data != NULL))
return dev->platform_data;
/* OK, just traverse the bus to find it */
for(otherdev = dev->parent; otherdev;
otherdev = otherdev->parent) {
if(otherdev->platform_data) {
dev->platform_data = otherdev->platform_data;
break;
}
}
BUG_ON(!dev->platform_data);
return dev->platform_data;
}
#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu);
#ifdef CONFIG_IOMMU_CCIO
struct parisc_device;
struct ioc;
void * ccio_get_iommu(const struct parisc_device *dev);
int ccio_request_resource(const struct parisc_device *dev,
struct resource *res);
int ccio_allocate_resource(const struct parisc_device *dev,
struct resource *res, unsigned long size,
unsigned long min, unsigned long max, unsigned long align);
#else /* !CONFIG_IOMMU_CCIO */
#define ccio_get_iommu(dev) NULL
#define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res)
#define ccio_allocate_resource(dev, res, size, min, max, align) \
allocate_resource(&iomem_resource, res, size, min, max, \
align, NULL, NULL)
#endif /* !CONFIG_IOMMU_CCIO */
#ifdef CONFIG_IOMMU_SBA
struct parisc_device;
void * sba_get_iommu(struct parisc_device *dev);
#endif
/* At the moment, we panic on error for IOMMU resource exaustion */
#define dma_mapping_error(dev, x) 0
#endif

View File

@@ -0,0 +1,186 @@
/* $Id: dma.h,v 1.2 1999/04/27 00:46:18 deller Exp $
* linux/include/asm/dma.h: Defines for using and allocating dma channels.
* Written by Hennus Bergman, 1992.
* High DMA channel support & info by Hannu Savolainen
* and John Boyd, Nov. 1992.
* (c) Copyright 2000, Grant Grundler
*/
#ifndef _ASM_DMA_H
#define _ASM_DMA_H
#include <asm/io.h> /* need byte IO */
#include <asm/system.h>
#define dma_outb outb
#define dma_inb inb
/*
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
** (or rather not merge) DMAs into manageable chunks.
** On parisc, this is more of the software/tuning constraint
** rather than the HW. I/O MMU allocation algorithms can be
** faster with smaller sizes (to some degree).
*/
#define DMA_CHUNK_SIZE (BITS_PER_LONG*PAGE_SIZE)
/* The maximum address that we can perform a DMA transfer to on this platform
** New dynamic DMA interfaces should obsolete this....
*/
#define MAX_DMA_ADDRESS (~0UL)
/*
** We don't have DMA channels... well V-class does but the
** Dynamic DMA Mapping interface will support them... right? :^)
** Note: this is not relevant right now for PA-RISC, but we cannot
** leave this as undefined because some things (e.g. sound)
** won't compile :-(
*/
#define MAX_DMA_CHANNELS 8
#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
#define DMA_AUTOINIT 0x10
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
/* DMA controller registers */
#define DMA1_CMD_REG 0x08 /* command register (w) */
#define DMA1_STAT_REG 0x08 /* status register (r) */
#define DMA1_REQ_REG 0x09 /* request register (w) */
#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
#define DMA1_MODE_REG 0x0B /* mode register (w) */
#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
#define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG)
#define DMA2_CMD_REG 0xD0 /* command register (w) */
#define DMA2_STAT_REG 0xD0 /* status register (r) */
#define DMA2_REQ_REG 0xD2 /* request register (w) */
#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
#define DMA2_MODE_REG 0xD6 /* mode register (w) */
#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG)
static __inline__ unsigned long claim_dma_lock(void)
{
return 0;
}
static __inline__ void release_dma_lock(unsigned long flags)
{
}
/* Get DMA residue count. After a DMA transfer, this
* should return zero. Reading this while a DMA transfer is
* still in progress will return unpredictable results.
* If called before the channel has been used, it may return 1.
* Otherwise, it returns the number of _bytes_ left to transfer.
*
* Assumes DMA flip-flop is clear.
*/
static __inline__ int get_dma_residue(unsigned int dmanr)
{
unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
: ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
/* using short to get 16-bit wrap around */
unsigned short count;
count = 1 + dma_inb(io_port);
count += dma_inb(io_port) << 8;
return (dmanr<=3)? count : (count<<1);
}
/* enable/disable a specific DMA channel */
static __inline__ void enable_dma(unsigned int dmanr)
{
#ifdef CONFIG_SUPERIO
if (dmanr<=3)
dma_outb(dmanr, DMA1_MASK_REG);
else
dma_outb(dmanr & 3, DMA2_MASK_REG);
#endif
}
static __inline__ void disable_dma(unsigned int dmanr)
{
#ifdef CONFIG_SUPERIO
if (dmanr<=3)
dma_outb(dmanr | 4, DMA1_MASK_REG);
else
dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
#endif
}
/* reserve a DMA channel */
#define request_dma(dmanr, device_id) (0)
/* Clear the 'DMA Pointer Flip Flop'.
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
* Use this once to initialize the FF to a known state.
* After that, keep track of it. :-)
* --- In order to do that, the DMA routines below should ---
* --- only be used while holding the DMA lock ! ---
*/
static __inline__ void clear_dma_ff(unsigned int dmanr)
{
}
/* set mode (above) for a specific DMA channel */
static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
{
}
/* Set only the page register bits of the transfer address.
* This is used for successive transfers when we know the contents of
* the lower 16 bits of the DMA current address register, but a 64k boundary
* may have been crossed.
*/
static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
{
}
/* Set transfer address & page bits for specific DMA channel.
* Assumes dma flipflop is clear.
*/
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
{
}
/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
* a specific DMA channel.
* You must ensure the parameters are valid.
* NOTE: from a manual: "the number of transfers is one more
* than the initial word count"! This is taken into account.
* Assumes dma flip-flop is clear.
* NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
*/
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
{
}
#define free_dma(dmanr)
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_DMA_H */

View File

@@ -0,0 +1,23 @@
/*
* eisa_bus.h interface between the eisa BA driver and the bus enumerator
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Copyright (c) 2002 Daniel Engstrom <5116@telia.com>
*
*/
#ifndef ASM_EISA_H
#define ASM_EISA_H
extern void eisa_make_irq_level(int num);
extern void eisa_make_irq_edge(int num);
extern int eisa_enumerator(unsigned long eeprom_addr,
struct resource *io_parent,
struct resource *mem_parent);
extern int eisa_eeprom_init(unsigned long addr);
#endif

View File

@@ -0,0 +1,153 @@
/*
* eisa_eeprom.h - provide support for EISA adapters in PA-RISC machines
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Copyright (c) 2001, 2002 Daniel Engstrom <5116@telia.com>
*
*/
#ifndef ASM_EISA_EEPROM_H
#define ASM_EISA_EEPROM_H
extern void __iomem *eisa_eeprom_addr;
#define HPEE_MAX_LENGTH 0x2000 /* maximum eeprom length */
#define HPEE_SLOT_INFO(slot) (20+(48*slot))
struct eeprom_header
{
u_int32_t num_writes; /* number of writes */
u_int8_t flags; /* flags, usage? */
u_int8_t ver_maj;
u_int8_t ver_min;
u_int8_t num_slots; /* number of EISA slots in system */
u_int16_t csum; /* checksum, I don't know how to calulate this */
u_int8_t pad[10];
} __attribute__ ((packed));
struct eeprom_eisa_slot_info
{
u_int32_t eisa_slot_id;
u_int32_t config_data_offset;
u_int32_t num_writes;
u_int16_t csum;
u_int16_t num_functions;
u_int16_t config_data_length;
/* bits 0..3 are the duplicate slot id */
#define HPEE_SLOT_INFO_EMBEDDED 0x10
#define HPEE_SLOT_INFO_VIRTUAL 0x20
#define HPEE_SLOT_INFO_NO_READID 0x40
#define HPEE_SLOT_INFO_DUPLICATE 0x80
u_int8_t slot_info;
#define HPEE_SLOT_FEATURES_ENABLE 0x01
#define HPEE_SLOT_FEATURES_IOCHK 0x02
#define HPEE_SLOT_FEATURES_CFG_INCOMPLETE 0x80
u_int8_t slot_features;
u_int8_t ver_min;
u_int8_t ver_maj;
#define HPEE_FUNCTION_INFO_HAVE_TYPE 0x01
#define HPEE_FUNCTION_INFO_HAVE_MEMORY 0x02
#define HPEE_FUNCTION_INFO_HAVE_IRQ 0x04
#define HPEE_FUNCTION_INFO_HAVE_DMA 0x08
#define HPEE_FUNCTION_INFO_HAVE_PORT 0x10
#define HPEE_FUNCTION_INFO_HAVE_PORT_INIT 0x20
/* I think there are two slighty different
* versions of the function_info field
* one int the fixed header and one optional
* in the parsed slot data area */
#define HPEE_FUNCTION_INFO_HAVE_FUNCTION 0x01
#define HPEE_FUNCTION_INFO_F_DISABLED 0x80
#define HPEE_FUNCTION_INFO_CFG_FREE_FORM 0x40
u_int8_t function_info;
#define HPEE_FLAG_BOARD_IS_ISA 0x01 /* flag and minor version for isa board */
u_int8_t flags;
u_int8_t pad[24];
} __attribute__ ((packed));
#define HPEE_MEMORY_MAX_ENT 9
/* memory descriptor: byte 0 */
#define HPEE_MEMORY_WRITABLE 0x01
#define HPEE_MEMORY_CACHABLE 0x02
#define HPEE_MEMORY_TYPE_MASK 0x18
#define HPEE_MEMORY_TYPE_SYS 0x00
#define HPEE_MEMORY_TYPE_EXP 0x08
#define HPEE_MEMORY_TYPE_VIR 0x10
#define HPEE_MEMORY_TYPE_OTH 0x18
#define HPEE_MEMORY_SHARED 0x20
#define HPEE_MEMORY_MORE 0x80
/* memory descriptor: byte 1 */
#define HPEE_MEMORY_WIDTH_MASK 0x03
#define HPEE_MEMORY_WIDTH_BYTE 0x00
#define HPEE_MEMORY_WIDTH_WORD 0x01
#define HPEE_MEMORY_WIDTH_DWORD 0x02
#define HPEE_MEMORY_DECODE_MASK 0x0c
#define HPEE_MEMORY_DECODE_20BITS 0x00
#define HPEE_MEMORY_DECODE_24BITS 0x04
#define HPEE_MEMORY_DECODE_32BITS 0x08
/* byte 2 and 3 are a 16bit LE value
* containging the memory size in kilobytes */
/* byte 4,5,6 are a 24bit LE value
* containing the memory base address */
#define HPEE_IRQ_MAX_ENT 7
/* Interrupt entry: byte 0 */
#define HPEE_IRQ_CHANNEL_MASK 0xf
#define HPEE_IRQ_TRIG_LEVEL 0x20
#define HPEE_IRQ_MORE 0x80
/* byte 1 seems to be unused */
#define HPEE_DMA_MAX_ENT 4
/* dma entry: byte 0 */
#define HPEE_DMA_CHANNEL_MASK 7
#define HPEE_DMA_SIZE_MASK 0xc
#define HPEE_DMA_SIZE_BYTE 0x0
#define HPEE_DMA_SIZE_WORD 0x4
#define HPEE_DMA_SIZE_DWORD 0x8
#define HPEE_DMA_SHARED 0x40
#define HPEE_DMA_MORE 0x80
/* dma entry: byte 1 */
#define HPEE_DMA_TIMING_MASK 0x30
#define HPEE_DMA_TIMING_ISA 0x0
#define HPEE_DMA_TIMING_TYPEA 0x10
#define HPEE_DMA_TIMING_TYPEB 0x20
#define HPEE_DMA_TIMING_TYPEC 0x30
#define HPEE_PORT_MAX_ENT 20
/* port entry byte 0 */
#define HPEE_PORT_SIZE_MASK 0x1f
#define HPEE_PORT_SHARED 0x40
#define HPEE_PORT_MORE 0x80
/* byte 1 and 2 is a 16bit LE value
* conating the start port number */
#define HPEE_PORT_INIT_MAX_LEN 60 /* in bytes here */
/* port init entry byte 0 */
#define HPEE_PORT_INIT_WIDTH_MASK 0x3
#define HPEE_PORT_INIT_WIDTH_BYTE 0x0
#define HPEE_PORT_INIT_WIDTH_WORD 0x1
#define HPEE_PORT_INIT_WIDTH_DWORD 0x2
#define HPEE_PORT_INIT_MASK 0x4
#define HPEE_PORT_INIT_MORE 0x80
#define HPEE_SELECTION_MAX_ENT 26
#define HPEE_TYPE_MAX_LEN 80
#endif

View File

@@ -0,0 +1,342 @@
#ifndef __ASMPARISC_ELF_H
#define __ASMPARISC_ELF_H
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
#define EM_PARISC 15
/* HPPA specific definitions. */
/* Legal values for e_flags field of Elf32_Ehdr. */
#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */
#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */
#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */
#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */
#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch
prediction. */
#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */
#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */
/* Defined values for `e_flags & EF_PARISC_ARCH' are: */
#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */
#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */
#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */
/* Additional section indices. */
#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared
symbols in ANSI C. */
#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */
/* Legal values for sh_type field of Elf32_Shdr. */
#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */
#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */
#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */
/* Legal values for sh_flags field of Elf32_Shdr. */
#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */
#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */
#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */
/* Legal values for ST_TYPE subfield of st_info (symbol type). */
#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */
#define STT_HP_OPAQUE (STT_LOOS + 0x1)
#define STT_HP_STUB (STT_LOOS + 0x2)
/* HPPA relocs. */
#define R_PARISC_NONE 0 /* No reloc. */
#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */
#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */
#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */
#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */
#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */
#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */
#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */
#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */
#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */
#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */
#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */
#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */
#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */
#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */
#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */
#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */
#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */
#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */
#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */
#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */
#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */
#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */
#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */
#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */
#define R_PARISC_FPTR64 64 /* 64 bits function address. */
#define R_PARISC_PLABEL32 65 /* 32 bits function address. */
#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */
#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */
#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */
#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */
#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */
#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */
#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */
#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */
#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */
#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */
#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */
#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */
#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */
#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */
#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */
#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */
#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */
#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */
#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */
#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */
#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */
#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */
#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */
#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */
#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */
#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */
#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */
#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */
#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */
#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */
#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */
#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */
#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */
#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */
#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */
#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */
#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */
#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */
#define R_PARISC_LORESERVE 128
#define R_PARISC_COPY 128 /* Copy relocation. */
#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */
#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */
#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */
#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */
#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */
#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */
#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/
#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */
#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */
#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */
#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */
#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */
#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */
#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */
#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */
#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/
#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/
#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */
#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */
#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */
#define R_PARISC_HIRESERVE 255
#define PA_PLABEL_FDESC 0x02 /* bit set if PLABEL points to
* a function descriptor, not
* an address */
/* The following are PA function descriptors
*
* addr: the absolute address of the function
* gp: either the data pointer (r27) for non-PIC code or the
* the PLT pointer (r19) for PIC code */
/* Format for the Elf32 Function descriptor */
typedef struct elf32_fdesc {
__u32 addr;
__u32 gp;
} Elf32_Fdesc;
/* Format for the Elf64 Function descriptor */
typedef struct elf64_fdesc {
__u64 dummy[2]; /* FIXME: nothing uses these, why waste
* the space */
__u64 addr;
__u64 gp;
} Elf64_Fdesc;
/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */
#define PT_HP_TLS (PT_LOOS + 0x0)
#define PT_HP_CORE_NONE (PT_LOOS + 0x1)
#define PT_HP_CORE_VERSION (PT_LOOS + 0x2)
#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3)
#define PT_HP_CORE_COMM (PT_LOOS + 0x4)
#define PT_HP_CORE_PROC (PT_LOOS + 0x5)
#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6)
#define PT_HP_CORE_STACK (PT_LOOS + 0x7)
#define PT_HP_CORE_SHM (PT_LOOS + 0x8)
#define PT_HP_CORE_MMF (PT_LOOS + 0x9)
#define PT_HP_PARALLEL (PT_LOOS + 0x10)
#define PT_HP_FASTBIND (PT_LOOS + 0x11)
#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12)
#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13)
#define PT_HP_STACK (PT_LOOS + 0x14)
#define PT_PARISC_ARCHEXT 0x70000000
#define PT_PARISC_UNWIND 0x70000001
/* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */
#define PF_PARISC_SBP 0x08000000
#define PF_HP_PAGE_SIZE 0x00100000
#define PF_HP_FAR_SHARED 0x00200000
#define PF_HP_NEAR_SHARED 0x00400000
#define PF_HP_CODE 0x01000000
#define PF_HP_MODIFY 0x02000000
#define PF_HP_LAZYSWAP 0x04000000
#define PF_HP_SBP 0x08000000
/*
* The following definitions are those for 32-bit ELF binaries on a 32-bit
* kernel and for 64-bit binaries on a 64-bit kernel. To run 32-bit binaries
* on a 64-bit kernel, arch/parisc/kernel/binfmt_elf32.c defines these
* macros appropriately and then #includes binfmt_elf.c, which then includes
* this file.
*/
#ifndef ELF_CLASS
/*
* This is used to ensure we don't load something for the wrong architecture.
*
* Note that this header file is used by default in fs/binfmt_elf.c. So
* the following macros are for the default case. However, for the 64
* bit kernel we also support 32 bit parisc binaries. To do that
* arch/parisc/kernel/binfmt_elf32.c defines its own set of these
* macros, and then it includes fs/binfmt_elf.c to provide an alternate
* elf binary handler for 32 bit binaries (on the 64 bit kernel).
*/
#ifdef CONFIG_64BIT
#define ELF_CLASS ELFCLASS64
#else
#define ELF_CLASS ELFCLASS32
#endif
typedef unsigned long elf_greg_t;
/*
* This yields a string that ld.so will use to load implementation
* specific libraries for optimization. This is more specific in
* intent than poking at uname or /proc/cpuinfo.
*/
#define ELF_PLATFORM ("PARISC\0")
#define SET_PERSONALITY(ex) \
current->personality = PER_LINUX; \
current->thread.map_base = DEFAULT_MAP_BASE; \
current->thread.task_size = DEFAULT_TASK_SIZE \
/*
* Fill in general registers in a core dump. This saves pretty
* much the same registers as hp-ux, although in a different order.
* Registers marked # below are not currently saved in pt_regs, so
* we use their current values here.
*
* gr0..gr31
* sr0..sr7
* iaoq0..iaoq1
* iasq0..iasq1
* cr11 (sar)
* cr19 (iir)
* cr20 (isr)
* cr21 (ior)
* # cr22 (ipsw)
* # cr0 (recovery counter)
* # cr24..cr31 (temporary registers)
* # cr8,9,12,13 (protection IDs)
* # cr10 (scr/ccr)
* # cr15 (ext int enable mask)
*
*/
#define ELF_CORE_COPY_REGS(dst, pt) \
memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \
memcpy(dst + 0, pt->gr, 32 * sizeof(elf_greg_t)); \
memcpy(dst + 32, pt->sr, 8 * sizeof(elf_greg_t)); \
memcpy(dst + 40, pt->iaoq, 2 * sizeof(elf_greg_t)); \
memcpy(dst + 42, pt->iasq, 2 * sizeof(elf_greg_t)); \
dst[44] = pt->sar; dst[45] = pt->iir; \
dst[46] = pt->isr; dst[47] = pt->ior; \
dst[48] = mfctl(22); dst[49] = mfctl(0); \
dst[50] = mfctl(24); dst[51] = mfctl(25); \
dst[52] = mfctl(26); dst[53] = mfctl(27); \
dst[54] = mfctl(28); dst[55] = mfctl(29); \
dst[56] = mfctl(30); dst[57] = mfctl(31); \
dst[58] = mfctl( 8); dst[59] = mfctl( 9); \
dst[60] = mfctl(12); dst[61] = mfctl(13); \
dst[62] = mfctl(10); dst[63] = mfctl(15);
#endif /* ! ELF_CLASS */
#define ELF_NGREG 80 /* We only need 64 at present, but leave space
for expansion. */
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
#define ELF_NFPREG 32
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
struct task_struct;
extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
struct pt_regs; /* forward declaration... */
#define elf_check_arch(x) ((x)->e_machine == EM_PARISC && (x)->e_ident[EI_CLASS] == ELF_CLASS)
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_PARISC
#define ELF_OSABI ELFOSABI_LINUX
/* %r23 is set by ld.so to a pointer to a function which might be
registered using atexit. This provides a means for the dynamic
linker to call DT_FINI functions for shared libraries that have
been loaded before the code runs.
So that we can use the same startup file with static executables,
we start programs with a value of 0 to indicate that there is no
such function. */
#define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk.
(2 * TASK_SIZE / 3) turns into something undefined when run through a
32 bit preprocessor and in some cases results in the kernel trying to map
ld.so to the kernel virtual base. Use a sane value instead. /Jes
*/
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
but it's not easy, and we've already done it here. */
#define ELF_HWCAP 0
#endif

View File

@@ -0,0 +1,6 @@
#ifndef _ASM_EMERGENCY_RESTART_H
#define _ASM_EMERGENCY_RESTART_H
#include <asm-generic/emergency-restart.h>
#endif /* _ASM_EMERGENCY_RESTART_H */

View File

@@ -0,0 +1,124 @@
#ifndef _PARISC_ERRNO_H
#define _PARISC_ERRNO_H
#include <asm-generic/errno-base.h>
#define ENOMSG 35 /* No message of desired type */
#define EIDRM 36 /* Identifier removed */
#define ECHRNG 37 /* Channel number out of range */
#define EL2NSYNC 38 /* Level 2 not synchronized */
#define EL3HLT 39 /* Level 3 halted */
#define EL3RST 40 /* Level 3 reset */
#define ELNRNG 41 /* Link number out of range */
#define EUNATCH 42 /* Protocol driver not attached */
#define ENOCSI 43 /* No CSI structure available */
#define EL2HLT 44 /* Level 2 halted */
#define EDEADLK 45 /* Resource deadlock would occur */
#define EDEADLOCK EDEADLK
#define ENOLCK 46 /* No record locks available */
#define EILSEQ 47 /* Illegal byte sequence */
#define ENONET 50 /* Machine is not on the network */
#define ENODATA 51 /* No data available */
#define ETIME 52 /* Timer expired */
#define ENOSR 53 /* Out of streams resources */
#define ENOSTR 54 /* Device not a stream */
#define ENOPKG 55 /* Package not installed */
#define ENOLINK 57 /* Link has been severed */
#define EADV 58 /* Advertise error */
#define ESRMNT 59 /* Srmount error */
#define ECOMM 60 /* Communication error on send */
#define EPROTO 61 /* Protocol error */
#define EMULTIHOP 64 /* Multihop attempted */
#define EDOTDOT 66 /* RFS specific error */
#define EBADMSG 67 /* Not a data message */
#define EUSERS 68 /* Too many users */
#define EDQUOT 69 /* Quota exceeded */
#define ESTALE 70 /* Stale NFS file handle */
#define EREMOTE 71 /* Object is remote */
#define EOVERFLOW 72 /* Value too large for defined data type */
/* these errnos are defined by Linux but not HPUX. */
#define EBADE 160 /* Invalid exchange */
#define EBADR 161 /* Invalid request descriptor */
#define EXFULL 162 /* Exchange full */
#define ENOANO 163 /* No anode */
#define EBADRQC 164 /* Invalid request code */
#define EBADSLT 165 /* Invalid slot */
#define EBFONT 166 /* Bad font file format */
#define ENOTUNIQ 167 /* Name not unique on network */
#define EBADFD 168 /* File descriptor in bad state */
#define EREMCHG 169 /* Remote address changed */
#define ELIBACC 170 /* Can not access a needed shared library */
#define ELIBBAD 171 /* Accessing a corrupted shared library */
#define ELIBSCN 172 /* .lib section in a.out corrupted */
#define ELIBMAX 173 /* Attempting to link in too many shared libraries */
#define ELIBEXEC 174 /* Cannot exec a shared library directly */
#define ERESTART 175 /* Interrupted system call should be restarted */
#define ESTRPIPE 176 /* Streams pipe error */
#define EUCLEAN 177 /* Structure needs cleaning */
#define ENOTNAM 178 /* Not a XENIX named type file */
#define ENAVAIL 179 /* No XENIX semaphores available */
#define EISNAM 180 /* Is a named type file */
#define EREMOTEIO 181 /* Remote I/O error */
#define ENOMEDIUM 182 /* No medium found */
#define EMEDIUMTYPE 183 /* Wrong medium type */
#define ENOKEY 184 /* Required key not available */
#define EKEYEXPIRED 185 /* Key has expired */
#define EKEYREVOKED 186 /* Key has been revoked */
#define EKEYREJECTED 187 /* Key was rejected by service */
/* We now return you to your regularly scheduled HPUX. */
#define ENOSYM 215 /* symbol does not exist in executable */
#define ENOTSOCK 216 /* Socket operation on non-socket */
#define EDESTADDRREQ 217 /* Destination address required */
#define EMSGSIZE 218 /* Message too long */
#define EPROTOTYPE 219 /* Protocol wrong type for socket */
#define ENOPROTOOPT 220 /* Protocol not available */
#define EPROTONOSUPPORT 221 /* Protocol not supported */
#define ESOCKTNOSUPPORT 222 /* Socket type not supported */
#define EOPNOTSUPP 223 /* Operation not supported on transport endpoint */
#define EPFNOSUPPORT 224 /* Protocol family not supported */
#define EAFNOSUPPORT 225 /* Address family not supported by protocol */
#define EADDRINUSE 226 /* Address already in use */
#define EADDRNOTAVAIL 227 /* Cannot assign requested address */
#define ENETDOWN 228 /* Network is down */
#define ENETUNREACH 229 /* Network is unreachable */
#define ENETRESET 230 /* Network dropped connection because of reset */
#define ECONNABORTED 231 /* Software caused connection abort */
#define ECONNRESET 232 /* Connection reset by peer */
#define ENOBUFS 233 /* No buffer space available */
#define EISCONN 234 /* Transport endpoint is already connected */
#define ENOTCONN 235 /* Transport endpoint is not connected */
#define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */
#define ETOOMANYREFS 237 /* Too many references: cannot splice */
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
#define ETIMEDOUT 238 /* Connection timed out */
#define ECONNREFUSED 239 /* Connection refused */
#define EREMOTERELEASE 240 /* Remote peer released connection */
#define EHOSTDOWN 241 /* Host is down */
#define EHOSTUNREACH 242 /* No route to host */
#define EALREADY 244 /* Operation already in progress */
#define EINPROGRESS 245 /* Operation now in progress */
#define EWOULDBLOCK 246 /* Operation would block (Linux returns EAGAIN) */
#define ENOTEMPTY 247 /* Directory not empty */
#define ENAMETOOLONG 248 /* File name too long */
#define ELOOP 249 /* Too many symbolic links encountered */
#define ENOSYS 251 /* Function not implemented */
#define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */
#define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */
#define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */
/* for robust mutexes */
#define EOWNERDEAD 254 /* Owner died */
#define ENOTRECOVERABLE 255 /* State not recoverable */
#endif

View File

@@ -0,0 +1,19 @@
#ifndef _ASM_FB_H_
#define _ASM_FB_H_
#include <linux/fb.h>
#include <linux/fs.h>
#include <asm/page.h>
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
}
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
#endif /* _ASM_FB_H_ */

View File

@@ -0,0 +1,39 @@
#ifndef _PARISC_FCNTL_H
#define _PARISC_FCNTL_H
/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
located on an ext2 file system */
#define O_APPEND 000000010
#define O_BLKSEEK 000000100 /* HPUX only */
#define O_CREAT 000000400 /* not fcntl */
#define O_EXCL 000002000 /* not fcntl */
#define O_LARGEFILE 000004000
#define O_SYNC 000100000
#define O_NONBLOCK 000200004 /* HPUX has separate NDELAY & NONBLOCK */
#define O_NOCTTY 000400000 /* not fcntl */
#define O_DSYNC 001000000 /* HPUX only */
#define O_RSYNC 002000000 /* HPUX only */
#define O_NOATIME 004000000
#define O_CLOEXEC 010000000 /* set close_on_exec */
#define O_DIRECTORY 000010000 /* must be a directory */
#define O_NOFOLLOW 000000200 /* don't follow links */
#define O_INVISIBLE 004000000 /* invisible I/O, for DMAPI/XDSM */
#define F_GETLK64 8
#define F_SETLK64 9
#define F_SETLKW64 10
#define F_GETOWN 11 /* for sockets. */
#define F_SETOWN 12 /* for sockets. */
#define F_SETSIG 13 /* for sockets. */
#define F_GETSIG 14 /* for sockets. */
/* for posix fcntl() and lockf() */
#define F_RDLCK 01
#define F_WRLCK 02
#define F_UNLCK 03
#include <asm-generic/fcntl.h>
#endif

View File

@@ -0,0 +1,30 @@
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
/*
* This file defines the locations of the fixed mappings on parisc.
*
* All of the values in this file are machine virtual addresses.
*
* All of the values in this file must be <4GB (because of assembly
* loading restrictions). If you place this region anywhere above
* __PAGE_OFFSET, you must adjust the memory map accordingly */
/* The alias region is used in kernel space to do copy/clear to or
* from areas congruently mapped with user space. It is 8MB large
* and must be 16MB aligned */
#define TMPALIAS_MAP_START ((__PAGE_OFFSET) - 16*1024*1024)
/* This is the kernel area for all maps (vmalloc, dma etc.) most
* usually, it extends up to TMPALIAS_MAP_START. Virtual addresses
* 0..GATEWAY_PAGE_SIZE are reserved for the gateway page */
#define KERNEL_MAP_START (GATEWAY_PAGE_SIZE)
#define KERNEL_MAP_END (TMPALIAS_MAP_START)
#ifndef __ASSEMBLY__
extern void *vmalloc_start;
#define PCXL_DMA_MAP_SIZE (8*1024*1024)
#define VMALLOC_START ((unsigned long)vmalloc_start)
#define VMALLOC_END (KERNEL_MAP_END)
#endif /*__ASSEMBLY__*/
#endif /*_ASM_FIXMAP_H*/

View File

@@ -0,0 +1,271 @@
/* Architecture specific parts of the Floppy driver
*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
* Copyright (C) 2000 Matthew Wilcox (willy a debian . org)
* Copyright (C) 2000 Dave Kennedy
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_PARISC_FLOPPY_H
#define __ASM_PARISC_FLOPPY_H
#include <linux/vmalloc.h>
/*
* The DMA channel used by the floppy controller cannot access data at
* addresses >= 16MB
*
* Went back to the 1MB limit, as some people had problems with the floppy
* driver otherwise. It doesn't matter much for performance anyway, as most
* floppy accesses go through the track buffer.
*/
#define _CROSS_64KB(a,s,vdma) \
(!vdma && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
#define SW fd_routine[use_virtual_dma&1]
#define CSW fd_routine[can_use_virtual_dma & 1]
#define fd_inb(port) readb(port)
#define fd_outb(value, port) writeb(value, port)
#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy")
#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
#define FLOPPY_CAN_FALLBACK_ON_NODMA
static int virtual_dma_count=0;
static int virtual_dma_residue=0;
static char *virtual_dma_addr=0;
static int virtual_dma_mode=0;
static int doing_pdma=0;
static void floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
{
register unsigned char st;
#undef TRACE_FLPY_INT
#ifdef TRACE_FLPY_INT
static int calls=0;
static int bytes=0;
static int dma_wait=0;
#endif
if (!doing_pdma) {
floppy_interrupt(irq, dev_id, regs);
return;
}
#ifdef TRACE_FLPY_INT
if(!calls)
bytes = virtual_dma_count;
#endif
{
register int lcount;
register char *lptr = virtual_dma_addr;
for (lcount = virtual_dma_count; lcount; lcount--) {
st = fd_inb(virtual_dma_port+4) & 0xa0 ;
if (st != 0xa0)
break;
if (virtual_dma_mode) {
fd_outb(*lptr, virtual_dma_port+5);
} else {
*lptr = fd_inb(virtual_dma_port+5);
}
lptr++;
}
virtual_dma_count = lcount;
virtual_dma_addr = lptr;
st = fd_inb(virtual_dma_port+4);
}
#ifdef TRACE_FLPY_INT
calls++;
#endif
if (st == 0x20)
return;
if (!(st & 0x20)) {
virtual_dma_residue += virtual_dma_count;
virtual_dma_count = 0;
#ifdef TRACE_FLPY_INT
printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
virtual_dma_count, virtual_dma_residue, calls, bytes,
dma_wait);
calls = 0;
dma_wait=0;
#endif
doing_pdma = 0;
floppy_interrupt(irq, dev_id, regs);
return;
}
#ifdef TRACE_FLPY_INT
if (!virtual_dma_count)
dma_wait++;
#endif
}
static void fd_disable_dma(void)
{
if(! (can_use_virtual_dma & 1))
disable_dma(FLOPPY_DMA);
doing_pdma = 0;
virtual_dma_residue += virtual_dma_count;
virtual_dma_count=0;
}
static int vdma_request_dma(unsigned int dmanr, const char * device_id)
{
return 0;
}
static void vdma_nop(unsigned int dummy)
{
}
static int vdma_get_dma_residue(unsigned int dummy)
{
return virtual_dma_count + virtual_dma_residue;
}
static int fd_request_irq(void)
{
if(can_use_virtual_dma)
return request_irq(FLOPPY_IRQ, floppy_hardint,
IRQF_DISABLED, "floppy", NULL);
else
return request_irq(FLOPPY_IRQ, floppy_interrupt,
IRQF_DISABLED, "floppy", NULL);
}
static unsigned long dma_mem_alloc(unsigned long size)
{
return __get_dma_pages(GFP_KERNEL, get_order(size));
}
static unsigned long vdma_mem_alloc(unsigned long size)
{
return (unsigned long) vmalloc(size);
}
#define nodma_mem_alloc(size) vdma_mem_alloc(size)
static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
{
if((unsigned int) addr >= (unsigned int) high_memory)
return vfree((void *)addr);
else
free_pages(addr, get_order(size));
}
#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
static void _fd_chose_dma_mode(char *addr, unsigned long size)
{
if(can_use_virtual_dma == 2) {
if((unsigned int) addr >= (unsigned int) high_memory ||
virt_to_bus(addr) >= 0x1000000 ||
_CROSS_64KB(addr, size, 0))
use_virtual_dma = 1;
else
use_virtual_dma = 0;
} else {
use_virtual_dma = can_use_virtual_dma & 1;
}
}
#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
{
doing_pdma = 1;
virtual_dma_port = io;
virtual_dma_mode = (mode == DMA_MODE_WRITE);
virtual_dma_addr = addr;
virtual_dma_count = size;
virtual_dma_residue = 0;
return 0;
}
static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
{
#ifdef FLOPPY_SANITY_CHECK
if (CROSS_64KB(addr, size)) {
printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
return -1;
}
#endif
/* actual, physical DMA */
doing_pdma = 0;
clear_dma_ff(FLOPPY_DMA);
set_dma_mode(FLOPPY_DMA,mode);
set_dma_addr(FLOPPY_DMA,virt_to_bus(addr));
set_dma_count(FLOPPY_DMA,size);
enable_dma(FLOPPY_DMA);
return 0;
}
static struct fd_routine_l {
int (*_request_dma)(unsigned int dmanr, const char * device_id);
void (*_free_dma)(unsigned int dmanr);
int (*_get_dma_residue)(unsigned int dummy);
unsigned long (*_dma_mem_alloc) (unsigned long size);
int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
} fd_routine[] = {
{
request_dma,
free_dma,
get_dma_residue,
dma_mem_alloc,
hard_dma_setup
},
{
vdma_request_dma,
vdma_nop,
vdma_get_dma_residue,
vdma_mem_alloc,
vdma_dma_setup
}
};
static int FDC1 = 0x3f0; /* Lies. Floppy controller is memory mapped, not io mapped */
static int FDC2 = -1;
#define FLOPPY0_TYPE 0
#define FLOPPY1_TYPE 0
#define N_FDC 1
#define N_DRIVE 8
#define EXTRA_FLOPPY_PARAMS
#endif /* __ASM_PARISC_FLOPPY_H */

View File

@@ -0,0 +1,77 @@
#ifndef _ASM_PARISC_FUTEX_H
#define _ASM_PARISC_FUTEX_H
#ifdef __KERNEL__
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
static inline int
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
case FUTEX_OP_ADD:
case FUTEX_OP_OR:
case FUTEX_OP_ANDN:
case FUTEX_OP_XOR:
default:
ret = -ENOSYS;
}
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS;
}
}
return ret;
}
/* Non-atomic version */
static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
int err = 0;
int uval;
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
* our gateway page, and causes no end of trouble...
*/
if (segment_eq(KERNEL_DS, get_fs()) && !uaddr)
return -EFAULT;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
err = get_user(uval, uaddr);
if (err) return -EFAULT;
if (uval == oldval)
err = put_user(newval, uaddr);
if (err) return -EFAULT;
return uval;
}
#endif /*__KERNEL__*/
#endif /*_ASM_PARISC_FUTEX_H*/

View File

@@ -0,0 +1,113 @@
/* Architecture specific parts of HP's STI (framebuffer) driver.
* Structures are HP-UX compatible for XFree86 usage.
*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
* Copyright (C) 2001 Helge Deller (deller a parisc-linux org)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_PARISC_GRFIOCTL_H
#define __ASM_PARISC_GRFIOCTL_H
/* upper 32 bits of graphics id (HP/UX identifier) */
#define GRFGATOR 8
#define S9000_ID_S300 9
#define GRFBOBCAT 9
#define GRFCATSEYE 9
#define S9000_ID_98720 10
#define GRFRBOX 10
#define S9000_ID_98550 11
#define GRFFIREEYE 11
#define S9000_ID_A1096A 12
#define GRFHYPERION 12
#define S9000_ID_FRI 13
#define S9000_ID_98730 14
#define GRFDAVINCI 14
#define S9000_ID_98705 0x26C08070 /* Tigershark */
#define S9000_ID_98736 0x26D148AB
#define S9000_ID_A1659A 0x26D1482A /* CRX 8 plane color (=ELK) */
#define S9000_ID_ELK S9000_ID_A1659A
#define S9000_ID_A1439A 0x26D148EE /* CRX24 = CRX+ (24-plane color) */
#define S9000_ID_A1924A 0x26D1488C /* GRX gray-scale */
#define S9000_ID_ELM S9000_ID_A1924A
#define S9000_ID_98765 0x27480DEF
#define S9000_ID_ELK_768 0x27482101
#define S9000_ID_STINGER 0x27A4A402
#define S9000_ID_TIMBER 0x27F12392 /* Bushmaster (710) Graphics */
#define S9000_ID_TOMCAT 0x27FCCB6D /* dual-headed ELK (Dual CRX) */
#define S9000_ID_ARTIST 0x2B4DED6D /* Artist (Gecko/712 & 715) onboard Graphics */
#define S9000_ID_HCRX 0x2BCB015A /* Hyperdrive/Hyperbowl (A4071A) Graphics */
#define CRX24_OVERLAY_PLANES 0x920825AA /* Overlay planes on CRX24 */
#define CRT_ID_ELK_1024 S9000_ID_ELK_768 /* Elk 1024x768 CRX */
#define CRT_ID_ELK_1280 S9000_ID_A1659A /* Elk 1280x1024 CRX */
#define CRT_ID_ELK_1024DB 0x27849CA5 /* Elk 1024x768 double buffer */
#define CRT_ID_ELK_GS S9000_ID_A1924A /* Elk 1280x1024 GreyScale */
#define CRT_ID_CRX24 S9000_ID_A1439A /* Piranha */
#define CRT_ID_VISUALIZE_EG 0x2D08C0A7 /* Graffiti, A4450A (built-in B132+/B160L) */
#define CRT_ID_THUNDER 0x2F23E5FC /* Thunder 1 VISUALIZE 48*/
#define CRT_ID_THUNDER2 0x2F8D570E /* Thunder 2 VISUALIZE 48 XP*/
#define CRT_ID_HCRX S9000_ID_HCRX /* Hyperdrive HCRX */
#define CRT_ID_CRX48Z S9000_ID_STINGER /* Stinger */
#define CRT_ID_DUAL_CRX S9000_ID_TOMCAT /* Tomcat */
#define CRT_ID_PVRX S9000_ID_98705 /* Tigershark */
#define CRT_ID_TIMBER S9000_ID_TIMBER /* Timber (710 builtin) */
#define CRT_ID_TVRX S9000_ID_98765 /* TVRX (gto/falcon) */
#define CRT_ID_ARTIST S9000_ID_ARTIST /* Artist */
#define CRT_ID_SUMMIT 0x2FC1066B /* Summit FX2, FX4, FX6 ... */
#define CRT_ID_LEGO 0x35ACDA30 /* Lego FX5, FX10 ... */
#define CRT_ID_PINNACLE 0x35ACDA16 /* Pinnacle FXe */
/* structure for ioctl(GCDESCRIBE) */
#define gaddr_t unsigned long /* FIXME: PA2.0 (64bit) portable ? */
struct grf_fbinfo {
unsigned int id; /* upper 32 bits of graphics id */
unsigned int mapsize; /* mapped size of framebuffer */
unsigned int dwidth, dlength;/* x and y sizes */
unsigned int width, length; /* total x and total y size */
unsigned int xlen; /* x pitch size */
unsigned int bpp, bppu; /* bits per pixel and used bpp */
unsigned int npl, nplbytes; /* # of planes and bytes per plane */
char name[32]; /* name of the device (from ROM) */
unsigned int attr; /* attributes */
gaddr_t fbbase, regbase;/* framebuffer and register base addr */
gaddr_t regions[6]; /* region bases */
};
#define GCID _IOR('G', 0, int)
#define GCON _IO('G', 1)
#define GCOFF _IO('G', 2)
#define GCAON _IO('G', 3)
#define GCAOFF _IO('G', 4)
#define GCMAP _IOWR('G', 5, int)
#define GCUNMAP _IOWR('G', 6, int)
#define GCMAP_HPUX _IO('G', 5)
#define GCUNMAP_HPUX _IO('G', 6)
#define GCLOCK _IO('G', 7)
#define GCUNLOCK _IO('G', 8)
#define GCLOCK_MINIMUM _IO('G', 9)
#define GCUNLOCK_MINIMUM _IO('G', 10)
#define GCSTATIC_CMAP _IO('G', 11)
#define GCVARIABLE_CMAP _IO('G', 12)
#define GCTERM _IOWR('G',20,int) /* multi-headed Tomcat */
#define GCDESCRIBE _IOR('G', 21, struct grf_fbinfo)
#define GCFASTLOCK _IO('G', 26)
#endif /* __ASM_PARISC_GRFIOCTL_H */

View File

@@ -0,0 +1,29 @@
/* hardirq.h: PA-RISC hard IRQ support.
*
* Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
*
* The locking is really quite interesting. There's a cpu-local
* count of how many interrupts are being handled, and a global
* lock. An interrupt can only be serviced if the global lock
* is free. You can't be sure no more interrupts are being
* serviced until you've acquired the lock and then checked
* all the per-cpu interrupt counts are all zero. It's a specialised
* br_lock, and that's exactly how Sparc does it. We don't because
* it's more locking for us. This way is lock-free in the interrupt path.
*/
#ifndef _PARISC_HARDIRQ_H
#define _PARISC_HARDIRQ_H
#include <linux/threads.h>
#include <linux/irq.h>
typedef struct {
unsigned long __softirq_pending; /* set_bit is used on this */
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
void ack_bad_irq(unsigned int irq);
#endif /* _PARISC_HARDIRQ_H */

View File

@@ -0,0 +1,127 @@
#ifndef _PARISC_HARDWARE_H
#define _PARISC_HARDWARE_H
#include <linux/mod_devicetable.h>
#include <asm/pdc.h>
#define HWTYPE_ANY_ID PA_HWTYPE_ANY_ID
#define HVERSION_ANY_ID PA_HVERSION_ANY_ID
#define HVERSION_REV_ANY_ID PA_HVERSION_REV_ANY_ID
#define SVERSION_ANY_ID PA_SVERSION_ANY_ID
struct hp_hardware {
unsigned short hw_type:5; /* HPHW_xxx */
unsigned short hversion;
unsigned long sversion:28;
unsigned short opt;
const char name[80]; /* The hardware description */
};
struct parisc_device;
enum cpu_type {
pcx = 0, /* pa7000 pa 1.0 */
pcxs = 1, /* pa7000 pa 1.1a */
pcxt = 2, /* pa7100 pa 1.1b */
pcxt_ = 3, /* pa7200 (t') pa 1.1c */
pcxl = 4, /* pa7100lc pa 1.1d */
pcxl2 = 5, /* pa7300lc pa 1.1e */
pcxu = 6, /* pa8000 pa 2.0 */
pcxu_ = 7, /* pa8200 (u+) pa 2.0 */
pcxw = 8, /* pa8500 pa 2.0 */
pcxw_ = 9, /* pa8600 (w+) pa 2.0 */
pcxw2 = 10, /* pa8700 pa 2.0 */
mako = 11, /* pa8800 pa 2.0 */
mako2 = 12 /* pa8900 pa 2.0 */
};
extern const char * const cpu_name_version[][2]; /* mapping from enum cpu_type to strings */
struct parisc_driver;
struct io_module {
volatile uint32_t nothing; /* reg 0 */
volatile uint32_t io_eim;
volatile uint32_t io_dc_adata;
volatile uint32_t io_ii_cdata;
volatile uint32_t io_dma_link; /* reg 4 */
volatile uint32_t io_dma_command;
volatile uint32_t io_dma_address;
volatile uint32_t io_dma_count;
volatile uint32_t io_flex; /* reg 8 */
volatile uint32_t io_spa_address;
volatile uint32_t reserved1[2];
volatile uint32_t io_command; /* reg 12 */
volatile uint32_t io_status;
volatile uint32_t io_control;
volatile uint32_t io_data;
volatile uint32_t reserved2; /* reg 16 */
volatile uint32_t chain_addr;
volatile uint32_t sub_mask_clr;
volatile uint32_t reserved3[13];
volatile uint32_t undefined[480];
volatile uint32_t unpriv[512];
};
struct bc_module {
volatile uint32_t unused1[12];
volatile uint32_t io_command;
volatile uint32_t io_status;
volatile uint32_t io_control;
volatile uint32_t unused2[1];
volatile uint32_t io_err_resp;
volatile uint32_t io_err_info;
volatile uint32_t io_err_req;
volatile uint32_t unused3[11];
volatile uint32_t io_io_low;
volatile uint32_t io_io_high;
};
#define HPHW_NPROC 0
#define HPHW_MEMORY 1
#define HPHW_B_DMA 2
#define HPHW_OBSOLETE 3
#define HPHW_A_DMA 4
#define HPHW_A_DIRECT 5
#define HPHW_OTHER 6
#define HPHW_BCPORT 7
#define HPHW_CIO 8
#define HPHW_CONSOLE 9
#define HPHW_FIO 10
#define HPHW_BA 11
#define HPHW_IOA 12
#define HPHW_BRIDGE 13
#define HPHW_FABRIC 14
#define HPHW_MC 15
#define HPHW_FAULTY 31
/* hardware.c: */
extern const char *parisc_hardware_description(struct parisc_device_id *id);
extern enum cpu_type parisc_get_cpu_type(unsigned long hversion);
struct pci_dev;
/* drivers.c: */
extern struct parisc_device *alloc_pa_dev(unsigned long hpa,
struct hardware_path *path);
extern int register_parisc_device(struct parisc_device *dev);
extern int register_parisc_driver(struct parisc_driver *driver);
extern int count_parisc_driver(struct parisc_driver *driver);
extern int unregister_parisc_driver(struct parisc_driver *driver);
extern void walk_central_bus(void);
extern const struct parisc_device *find_pa_parent_type(const struct parisc_device *, int);
extern void print_parisc_devices(void);
extern char *print_pa_hwpath(struct parisc_device *dev, char *path);
extern char *print_pci_hwpath(struct pci_dev *dev, char *path);
extern void get_pci_node_path(struct pci_dev *dev, struct hardware_path *path);
extern void init_parisc_bus(void);
extern struct device *hwpath_to_device(struct hardware_path *modpath);
extern void device_to_hwpath(struct device *dev, struct hardware_path *path);
/* inventory.c: */
extern void do_memory_inventory(void);
extern void do_device_inventory(void);
#endif /* _PARISC_HARDWARE_H */

View File

@@ -0,0 +1,8 @@
#ifndef _ASM_HW_IRQ_H
#define _ASM_HW_IRQ_H
/*
* linux/include/asm/hw_irq.h
*/
#endif

View File

@@ -0,0 +1,57 @@
/*
* linux/include/asm-parisc/ide.h
*
* Copyright (C) 1994-1996 Linus Torvalds & authors
*/
/*
* This file contains the PARISC architecture specific IDE code.
*/
#ifndef __ASM_PARISC_IDE_H
#define __ASM_PARISC_IDE_H
#ifdef __KERNEL__
/* Generic I/O and MEMIO string operations. */
#define __ide_insw insw
#define __ide_insl insl
#define __ide_outsw outsw
#define __ide_outsl outsl
static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
{
while (count--) {
*(u16 *)addr = __raw_readw(port);
addr += 2;
}
}
static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
{
while (count--) {
*(u32 *)addr = __raw_readl(port);
addr += 4;
}
}
static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
{
while (count--) {
__raw_writew(*(u16 *)addr, port);
addr += 2;
}
}
static __inline__ void __ide_mm_outsl(void __iomem *port, void *addr, u32 count)
{
while (count--) {
__raw_writel(*(u32 *)addr, port);
addr += 4;
}
}
#endif /* __KERNEL__ */
#endif /* __ASM_PARISC_IDE_H */

View File

@@ -0,0 +1,293 @@
#ifndef _ASM_IO_H
#define _ASM_IO_H
#include <linux/types.h>
#include <asm/pgtable.h>
extern unsigned long parisc_vmerge_boundary;
extern unsigned long parisc_vmerge_max_size;
#define BIO_VMERGE_BOUNDARY parisc_vmerge_boundary
#define BIO_VMERGE_MAX_SIZE parisc_vmerge_max_size
#define virt_to_phys(a) ((unsigned long)__pa(a))
#define phys_to_virt(a) __va(a)
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
static inline unsigned long isa_bus_to_virt(unsigned long addr) {
BUG();
return 0;
}
static inline unsigned long isa_virt_to_bus(void *addr) {
BUG();
return 0;
}
/*
* Memory mapped I/O
*
* readX()/writeX() do byteswapping and take an ioremapped address
* __raw_readX()/__raw_writeX() don't byteswap and take an ioremapped address.
* gsc_*() don't byteswap and operate on physical addresses;
* eg dev->hpa or 0xfee00000.
*/
static inline unsigned char gsc_readb(unsigned long addr)
{
long flags;
unsigned char ret;
__asm__ __volatile__(
" rsm 2,%0\n"
" ldbx 0(%2),%1\n"
" mtsm %0\n"
: "=&r" (flags), "=r" (ret) : "r" (addr) );
return ret;
}
static inline unsigned short gsc_readw(unsigned long addr)
{
long flags;
unsigned short ret;
__asm__ __volatile__(
" rsm 2,%0\n"
" ldhx 0(%2),%1\n"
" mtsm %0\n"
: "=&r" (flags), "=r" (ret) : "r" (addr) );
return ret;
}
static inline unsigned int gsc_readl(unsigned long addr)
{
u32 ret;
__asm__ __volatile__(
" ldwax 0(%1),%0\n"
: "=r" (ret) : "r" (addr) );
return ret;
}
static inline unsigned long long gsc_readq(unsigned long addr)
{
unsigned long long ret;
#ifdef CONFIG_64BIT
__asm__ __volatile__(
" ldda 0(%1),%0\n"
: "=r" (ret) : "r" (addr) );
#else
/* two reads may have side effects.. */
ret = ((u64) gsc_readl(addr)) << 32;
ret |= gsc_readl(addr+4);
#endif
return ret;
}
static inline void gsc_writeb(unsigned char val, unsigned long addr)
{
long flags;
__asm__ __volatile__(
" rsm 2,%0\n"
" stbs %1,0(%2)\n"
" mtsm %0\n"
: "=&r" (flags) : "r" (val), "r" (addr) );
}
static inline void gsc_writew(unsigned short val, unsigned long addr)
{
long flags;
__asm__ __volatile__(
" rsm 2,%0\n"
" sths %1,0(%2)\n"
" mtsm %0\n"
: "=&r" (flags) : "r" (val), "r" (addr) );
}
static inline void gsc_writel(unsigned int val, unsigned long addr)
{
__asm__ __volatile__(
" stwas %0,0(%1)\n"
: : "r" (val), "r" (addr) );
}
static inline void gsc_writeq(unsigned long long val, unsigned long addr)
{
#ifdef CONFIG_64BIT
__asm__ __volatile__(
" stda %0,0(%1)\n"
: : "r" (val), "r" (addr) );
#else
/* two writes may have side effects.. */
gsc_writel(val >> 32, addr);
gsc_writel(val, addr+4);
#endif
}
/*
* The standard PCI ioremap interfaces
*/
extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
/* Most machines react poorly to I/O-space being cacheable... Instead let's
* define ioremap() in terms of ioremap_nocache().
*/
static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
{
return __ioremap(offset, size, _PAGE_NO_CACHE);
}
#define ioremap_nocache(off, sz) ioremap((off), (sz))
extern void iounmap(const volatile void __iomem *addr);
static inline unsigned char __raw_readb(const volatile void __iomem *addr)
{
return (*(volatile unsigned char __force *) (addr));
}
static inline unsigned short __raw_readw(const volatile void __iomem *addr)
{
return *(volatile unsigned short __force *) addr;
}
static inline unsigned int __raw_readl(const volatile void __iomem *addr)
{
return *(volatile unsigned int __force *) addr;
}
static inline unsigned long long __raw_readq(const volatile void __iomem *addr)
{
return *(volatile unsigned long long __force *) addr;
}
static inline void __raw_writeb(unsigned char b, volatile void __iomem *addr)
{
*(volatile unsigned char __force *) addr = b;
}
static inline void __raw_writew(unsigned short b, volatile void __iomem *addr)
{
*(volatile unsigned short __force *) addr = b;
}
static inline void __raw_writel(unsigned int b, volatile void __iomem *addr)
{
*(volatile unsigned int __force *) addr = b;
}
static inline void __raw_writeq(unsigned long long b, volatile void __iomem *addr)
{
*(volatile unsigned long long __force *) addr = b;
}
/* readb can never be const, so use __fswab instead of le*_to_cpu */
#define readb(addr) __raw_readb(addr)
#define readw(addr) __fswab16(__raw_readw(addr))
#define readl(addr) __fswab32(__raw_readl(addr))
#define readq(addr) __fswab64(__raw_readq(addr))
#define writeb(b, addr) __raw_writeb(b, addr)
#define writew(b, addr) __raw_writew(cpu_to_le16(b), addr)
#define writel(b, addr) __raw_writel(cpu_to_le32(b), addr)
#define writeq(b, addr) __raw_writeq(cpu_to_le64(b), addr)
#define readb_relaxed(addr) readb(addr)
#define readw_relaxed(addr) readw(addr)
#define readl_relaxed(addr) readl(addr)
#define readq_relaxed(addr) readq(addr)
#define mmiowb() do { } while (0)
void memset_io(volatile void __iomem *addr, unsigned char val, int count);
void memcpy_fromio(void *dst, const volatile void __iomem *src, int count);
void memcpy_toio(volatile void __iomem *dst, const void *src, int count);
/* Port-space IO */
#define inb_p inb
#define inw_p inw
#define inl_p inl
#define outb_p outb
#define outw_p outw
#define outl_p outl
extern unsigned char eisa_in8(unsigned short port);
extern unsigned short eisa_in16(unsigned short port);
extern unsigned int eisa_in32(unsigned short port);
extern void eisa_out8(unsigned char data, unsigned short port);
extern void eisa_out16(unsigned short data, unsigned short port);
extern void eisa_out32(unsigned int data, unsigned short port);
#if defined(CONFIG_PCI)
extern unsigned char inb(int addr);
extern unsigned short inw(int addr);
extern unsigned int inl(int addr);
extern void outb(unsigned char b, int addr);
extern void outw(unsigned short b, int addr);
extern void outl(unsigned int b, int addr);
#elif defined(CONFIG_EISA)
#define inb eisa_in8
#define inw eisa_in16
#define inl eisa_in32
#define outb eisa_out8
#define outw eisa_out16
#define outl eisa_out32
#else
static inline char inb(unsigned long addr)
{
BUG();
return -1;
}
static inline short inw(unsigned long addr)
{
BUG();
return -1;
}
static inline int inl(unsigned long addr)
{
BUG();
return -1;
}
#define outb(x, y) BUG()
#define outw(x, y) BUG()
#define outl(x, y) BUG()
#endif
/*
* String versions of in/out ops:
*/
extern void insb (unsigned long port, void *dst, unsigned long count);
extern void insw (unsigned long port, void *dst, unsigned long count);
extern void insl (unsigned long port, void *dst, unsigned long count);
extern void outsb (unsigned long port, const void *src, unsigned long count);
extern void outsw (unsigned long port, const void *src, unsigned long count);
extern void outsl (unsigned long port, const void *src, unsigned long count);
/* IO Port space is : BBiiii where BB is HBA number. */
#define IO_SPACE_LIMIT 0x00ffffff
/* PA machines have an MM I/O space from 0xf0000000-0xffffffff in 32
* bit mode and from 0xfffffffff0000000-0xfffffffffffffff in 64 bit
* mode (essentially just sign extending. This macro takes in a 32
* bit I/O address (still with the leading f) and outputs the correct
* value for either 32 or 64 bit mode */
#define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL)))
#include <asm-generic/iomap.h>
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif

View File

@@ -0,0 +1,44 @@
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
* Copyright (C) 1999,2003 Matthew Wilcox < willy at debian . org >
* portions from "linux/ioctl.h for Linux" by H.H. Bergman.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_PARISC_IOCTL_H
#define _ASM_PARISC_IOCTL_H
/* ioctl command encoding: 32 bits total, command in lower 16 bits,
* size of the parameter structure in the lower 14 bits of the
* upper 16 bits.
* Encoding the size of the parameter structure in the ioctl request
* is useful for catching programs compiled with old versions
* and to avoid overwriting user space outside the user buffer area.
* The highest 2 bits are reserved for indicating the ``access mode''.
* NOTE: This limits the max parameter size to 16kB -1 !
*/
/*
* Direction bits.
*/
#define _IOC_NONE 0U
#define _IOC_WRITE 2U
#define _IOC_READ 1U
#include <asm-generic/ioctl.h>
#endif /* _ASM_PARISC_IOCTL_H */

View File

@@ -0,0 +1,90 @@
#ifndef __ARCH_PARISC_IOCTLS_H__
#define __ARCH_PARISC_IOCTLS_H__
#include <asm/ioctl.h>
/* 0x54 is just a magic number to make these relatively unique ('T') */
#define TCGETS _IOR('T', 16, struct termios) /* TCGETATTR */
#define TCSETS _IOW('T', 17, struct termios) /* TCSETATTR */
#define TCSETSW _IOW('T', 18, struct termios) /* TCSETATTRD */
#define TCSETSF _IOW('T', 19, struct termios) /* TCSETATTRF */
#define TCGETA _IOR('T', 1, struct termio)
#define TCSETA _IOW('T', 2, struct termio)
#define TCSETAW _IOW('T', 3, struct termio)
#define TCSETAF _IOW('T', 4, struct termio)
#define TCSBRK _IO('T', 5)
#define TCXONC _IO('T', 6)
#define TCFLSH _IO('T', 7)
#define TIOCEXCL 0x540C
#define TIOCNXCL 0x540D
#define TIOCSCTTY 0x540E
#define TIOCGPGRP _IOR('T', 30, int)
#define TIOCSPGRP _IOW('T', 29, int)
#define TIOCOUTQ 0x5411
#define TIOCSTI 0x5412
#define TIOCGWINSZ 0x5413
#define TIOCSWINSZ 0x5414
#define TIOCMGET 0x5415
#define TIOCMBIS 0x5416
#define TIOCMBIC 0x5417
#define TIOCMSET 0x5418
#define TIOCGSOFTCAR 0x5419
#define TIOCSSOFTCAR 0x541A
#define FIONREAD 0x541B
#define TIOCINQ FIONREAD
#define TIOCLINUX 0x541C
#define TIOCCONS 0x541D
#define TIOCGSERIAL 0x541E
#define TIOCSSERIAL 0x541F
#define TIOCPKT 0x5420
#define FIONBIO 0x5421
#define TIOCNOTTY 0x5422
#define TIOCSETD 0x5423
#define TIOCGETD 0x5424
#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
#define TIOCSBRK 0x5427 /* BSD compatibility */
#define TIOCCBRK 0x5428 /* BSD compatibility */
#define TIOCGSID _IOR('T', 20, int) /* Return the session ID of FD */
#define TCGETS2 _IOR('T',0x2A, struct termios2)
#define TCSETS2 _IOW('T',0x2B, struct termios2)
#define TCSETSW2 _IOW('T',0x2C, struct termios2)
#define TCSETSF2 _IOW('T',0x2D, struct termios2)
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
#define FIOCLEX 0x5451
#define FIOASYNC 0x5452
#define TIOCSERCONFIG 0x5453
#define TIOCSERGWILD 0x5454
#define TIOCSERSWILD 0x5455
#define TIOCGLCKTRMIOS 0x5456
#define TIOCSLCKTRMIOS 0x5457
#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
#define TIOCSERGETLSR 0x5459 /* Get line status register */
#define TIOCSERGETMULTI 0x545A /* Get multiport config */
#define TIOCSERSETMULTI 0x545B /* Set multiport config */
#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
#define FIOQSIZE 0x5460 /* Get exact space used by quota */
#define TIOCSTART 0x5461
#define TIOCSTOP 0x5462
#define TIOCSLTC 0x5462
/* Used for packet mode */
#define TIOCPKT_DATA 0
#define TIOCPKT_FLUSHREAD 1
#define TIOCPKT_FLUSHWRITE 2
#define TIOCPKT_STOP 4
#define TIOCPKT_START 8
#define TIOCPKT_NOSTOP 16
#define TIOCPKT_DOSTOP 32
#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
#endif /* _ASM_PARISC_IOCTLS_H */

View File

@@ -0,0 +1,27 @@
#ifndef __PARISC_IPCBUF_H__
#define __PARISC_IPCBUF_H__
/*
* The ipc64_perm structure for PA-RISC is almost identical to
* kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the kernel.
* 'seq' has been changed from long to int so that it's the same size
* on 64-bit kernels as on 32-bit ones.
*/
struct ipc64_perm
{
key_t key;
uid_t uid;
gid_t gid;
uid_t cuid;
gid_t cgid;
unsigned short int __pad1;
mode_t mode;
unsigned short int __pad2;
unsigned short int seq;
unsigned int __pad3;
unsigned long long int __unused1;
unsigned long long int __unused2;
};
#endif /* __PARISC_IPCBUF_H__ */

View File

@@ -0,0 +1,57 @@
/*
* include/asm-parisc/irq.h
*
* Copyright 2005 Matthew Wilcox <matthew@wil.cx>
*/
#ifndef _ASM_PARISC_IRQ_H
#define _ASM_PARISC_IRQ_H
#include <linux/cpumask.h>
#include <asm/types.h>
#define NO_IRQ (-1)
#ifdef CONFIG_GSC
#define GSC_IRQ_BASE 16
#define GSC_IRQ_MAX 63
#define CPU_IRQ_BASE 64
#else
#define CPU_IRQ_BASE 16
#endif
#define TIMER_IRQ (CPU_IRQ_BASE + 0)
#define IPI_IRQ (CPU_IRQ_BASE + 1)
#define CPU_IRQ_MAX (CPU_IRQ_BASE + (BITS_PER_LONG - 1))
#define NR_IRQS (CPU_IRQ_MAX + 1)
static __inline__ int irq_canonicalize(int irq)
{
return (irq == 2) ? 9 : irq;
}
struct irq_chip;
/*
* Some useful "we don't have to do anything here" handlers. Should
* probably be provided by the generic code.
*/
void no_ack_irq(unsigned int irq);
void no_end_irq(unsigned int irq);
void cpu_ack_irq(unsigned int irq);
void cpu_end_irq(unsigned int irq);
extern int txn_alloc_irq(unsigned int nbits);
extern int txn_claim_irq(int);
extern unsigned int txn_alloc_data(unsigned int);
extern unsigned long txn_alloc_addr(unsigned int);
extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest);
/* soft power switch support (power.c) */
extern struct tasklet_struct power_tasklet;
#endif /* _ASM_PARISC_IRQ_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/irq_regs.h>

View File

@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>

View File

@@ -0,0 +1,30 @@
#ifndef _ASM_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H
#ifdef CONFIG_DEBUG_HIGHMEM
# define D(n) __KM_FENCE_##n ,
#else
# define D(n)
#endif
enum km_type {
D(0) KM_BOUNCE_READ,
D(1) KM_SKB_SUNRPC_DATA,
D(2) KM_SKB_DATA_SOFTIRQ,
D(3) KM_USER0,
D(4) KM_USER1,
D(5) KM_BIO_SRC_IRQ,
D(6) KM_BIO_DST_IRQ,
D(7) KM_PTE0,
D(8) KM_PTE1,
D(9) KM_IRQ0,
D(10) KM_IRQ1,
D(11) KM_SOFTIRQ0,
D(12) KM_SOFTIRQ1,
D(13) KM_TYPE_NR
};
#undef D
#endif

View File

@@ -0,0 +1,42 @@
#ifndef LED_H
#define LED_H
#define LED7 0x80 /* top (or furthest right) LED */
#define LED6 0x40
#define LED5 0x20
#define LED4 0x10
#define LED3 0x08
#define LED2 0x04
#define LED1 0x02
#define LED0 0x01 /* bottom (or furthest left) LED */
#define LED_LAN_TX LED0 /* for LAN transmit activity */
#define LED_LAN_RCV LED1 /* for LAN receive activity */
#define LED_DISK_IO LED2 /* for disk activity */
#define LED_HEARTBEAT LED3 /* heartbeat */
/* values for pdc_chassis_lcd_info_ret_block.model: */
#define DISPLAY_MODEL_LCD 0 /* KittyHawk LED or LCD */
#define DISPLAY_MODEL_NONE 1 /* no LED or LCD */
#define DISPLAY_MODEL_LASI 2 /* LASI style 8 bit LED */
#define DISPLAY_MODEL_OLD_ASP 0x7F /* faked: ASP style 8 x 1 bit LED (only very old ASP versions) */
#define LED_CMD_REG_NONE 0 /* NULL == no addr for the cmd register */
/* register_led_driver() */
int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long data_reg);
/* registers the LED regions for procfs */
void __init register_led_regions(void);
#ifdef CONFIG_CHASSIS_LCD_LED
/* writes a string to the LCD display (if possible on this h/w) */
int lcd_print(const char *str);
#else
#define lcd_print(str)
#endif
/* main LED initialization function (uses PDC) */
int __init led_init(void);
#endif /* LED_H */

View File

@@ -0,0 +1,31 @@
#ifndef __ASM_PARISC_LINKAGE_H
#define __ASM_PARISC_LINKAGE_H
#ifndef __ALIGN
#define __ALIGN .align 4
#define __ALIGN_STR ".align 4"
#endif
/*
* In parisc assembly a semicolon marks a comment while a
* exclamation mark is used to separate independent lines.
*/
#ifdef __ASSEMBLY__
#define ENTRY(name) \
.export name !\
ALIGN !\
name:
#ifdef CONFIG_64BIT
#define ENDPROC(name) \
END(name)
#else
#define ENDPROC(name) \
.type name, @function !\
END(name)
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PARISC_LINKAGE_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/local.h>

View File

@@ -0,0 +1,16 @@
#ifndef _PARISC_MACHDEP_H
#define _PARISC_MACHDEP_H
#include <linux/notifier.h>
#define MACH_RESTART 1
#define MACH_HALT 2
#define MACH_POWER_ON 3
#define MACH_POWER_OFF 4
extern struct notifier_block *mach_notifier;
extern void pa7300lc_init(void);
extern void (*cpu_lpmc)(int, struct pt_regs *);
#endif

View File

@@ -0,0 +1,9 @@
/*
* Machine dependent access functions for RTC registers.
*/
#ifndef _ASM_MC146818RTC_H
#define _ASM_MC146818RTC_H
/* empty include file to satisfy the include in genrtc.c */
#endif /* _ASM_MC146818RTC_H */

View File

@@ -0,0 +1,9 @@
#ifndef ASM_PARISC_MCKINLEY_H
#define ASM_PARISC_MCKINLEY_H
#ifdef __KERNEL__
/* declared in arch/parisc/kernel/setup.c */
extern struct proc_dir_entry * proc_mckinley_root;
#endif /*__KERNEL__*/
#endif /*ASM_PARISC_MCKINLEY_H*/

View File

@@ -0,0 +1,61 @@
#ifndef __PARISC_MMAN_H__
#define __PARISC_MMAN_H__
#define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x8 /* page may be used for atomic ops */
#define PROT_NONE 0x0 /* page can not be accessed */
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
#define MAP_SHARED 0x01 /* Share changes */
#define MAP_PRIVATE 0x02 /* Changes are private */
#define MAP_TYPE 0x03 /* Mask for type of mapping */
#define MAP_FIXED 0x04 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x10 /* don't use a file */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
#define MAP_LOCKED 0x2000 /* pages are locked */
#define MAP_NORESERVE 0x4000 /* don't check for reservations */
#define MAP_GROWSDOWN 0x8000 /* stack-like segment */
#define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x20000 /* do not block on IO */
#define MS_SYNC 1 /* synchronous memory sync */
#define MS_ASYNC 2 /* sync memory asynchronously */
#define MS_INVALIDATE 4 /* invalidate the caches */
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
#define MADV_NORMAL 0 /* no further special treatment */
#define MADV_RANDOM 1 /* expect random page references */
#define MADV_SEQUENTIAL 2 /* expect sequential page references */
#define MADV_WILLNEED 3 /* will need these pages */
#define MADV_DONTNEED 4 /* don't need these pages */
#define MADV_SPACEAVAIL 5 /* insure that resources are reserved */
#define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */
#define MADV_VPS_INHERIT 7 /* Inherit parents page size */
/* common/generic parameters */
#define MADV_REMOVE 9 /* remove these pages & resources */
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
/* The range 12-64 is reserved for page size specification. */
#define MADV_4K_PAGES 12 /* Use 4K pages */
#define MADV_16K_PAGES 14 /* Use 16K pages */
#define MADV_64K_PAGES 16 /* Use 64K pages */
#define MADV_256K_PAGES 18 /* Use 256K pages */
#define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */
#define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */
#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */
#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */
/* compatibility flags */
#define MAP_FILE 0
#define MAP_VARIABLE 0
#endif /* __PARISC_MMAN_H__ */

View File

@@ -0,0 +1,7 @@
#ifndef _PARISC_MMU_H_
#define _PARISC_MMU_H_
/* On parisc, we store the space id here */
typedef unsigned long mm_context_t;
#endif /* _PARISC_MMU_H_ */

View File

@@ -0,0 +1,75 @@
#ifndef __PARISC_MMU_CONTEXT_H
#define __PARISC_MMU_CONTEXT_H
#include <linux/mm.h>
#include <linux/sched.h>
#include <asm/atomic.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm-generic/mm_hooks.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
/* on PA-RISC, we actually have enough contexts to justify an allocator
* for them. prumpf */
extern unsigned long alloc_sid(void);
extern void free_sid(unsigned long);
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
BUG_ON(atomic_read(&mm->mm_users) != 1);
mm->context = alloc_sid();
return 0;
}
static inline void
destroy_context(struct mm_struct *mm)
{
free_sid(mm->context);
mm->context = 0;
}
static inline void load_context(mm_context_t context)
{
mtsp(context, 3);
#if SPACEID_SHIFT == 0
mtctl(context << 1,8);
#else
mtctl(context >> (SPACEID_SHIFT - 1),8);
#endif
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
if (prev != next) {
mtctl(__pa(next->pgd), 25);
load_context(next->context);
}
}
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
/*
* Activate_mm is our one chance to allocate a space id
* for a new mm created in the exec path. There's also
* some lazy tlb stuff, which is currently dead code, but
* we only allocate a space id if one hasn't been allocated
* already, so we should be OK.
*/
BUG_ON(next == &init_mm); /* Should never happen */
if (next->context == 0)
next->context = alloc_sid();
switch_mm(prev,next,current);
}
#endif

View File

@@ -0,0 +1,73 @@
#ifndef _PARISC_MMZONE_H
#define _PARISC_MMZONE_H
#ifdef CONFIG_DISCONTIGMEM
#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
extern int npmem_ranges;
struct node_map_data {
pg_data_t pg_data;
};
extern struct node_map_data node_data[];
#define NODE_DATA(nid) (&node_data[nid].pg_data)
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) \
({ \
pg_data_t *__pgdat = NODE_DATA(nid); \
__pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
})
/* We have these possible memory map layouts:
* Astro: 0-3.75, 67.75-68, 4-64
* zx1: 0-1, 257-260, 4-256
* Stretch (N-class): 0-2, 4-32, 34-xxx
*/
/* Since each 1GB can only belong to one region (node), we can create
* an index table for pfn to nid lookup; each entry in pfnnid_map
* represents 1GB, and contains the node that the memory belongs to. */
#define PFNNID_SHIFT (30 - PAGE_SHIFT)
#define PFNNID_MAP_MAX 512 /* support 512GB */
extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
#ifndef CONFIG_64BIT
#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
#else
/* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */
#define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT))
#endif
static inline int pfn_to_nid(unsigned long pfn)
{
unsigned int i;
unsigned char r;
if (unlikely(pfn_is_io(pfn)))
return 0;
i = pfn >> PFNNID_SHIFT;
BUG_ON(i >= sizeof(pfnnid_map) / sizeof(pfnnid_map[0]));
r = pfnnid_map[i];
BUG_ON(r == 0xff);
return (int)r;
}
static inline int pfn_valid(int pfn)
{
int nid = pfn_to_nid(pfn);
if (nid >= 0)
return (pfn < node_end_pfn(nid));
return 0;
}
#else /* !CONFIG_DISCONTIGMEM */
#define MAX_PHYSMEM_RANGES 1
#endif
#endif /* _PARISC_MMZONE_H */

View File

@@ -0,0 +1,32 @@
#ifndef _ASM_PARISC_MODULE_H
#define _ASM_PARISC_MODULE_H
/*
* This file contains the parisc architecture specific module code.
*/
#ifdef CONFIG_64BIT
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define Elf_Ehdr Elf64_Ehdr
#define Elf_Addr Elf64_Addr
#define Elf_Rela Elf64_Rela
#else
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
#define Elf_Addr Elf32_Addr
#define Elf_Rela Elf32_Rela
#endif
struct unwind_table;
struct mod_arch_specific
{
unsigned long got_offset, got_count, got_max;
unsigned long fdesc_offset, fdesc_count, fdesc_max;
unsigned long stub_offset, stub_count, stub_max;
unsigned long init_stub_offset, init_stub_count, init_stub_max;
int unwind_section;
struct unwind_table *unwind;
};
#endif /* _ASM_PARISC_MODULE_H */

View File

@@ -0,0 +1,37 @@
#ifndef _PARISC_MSGBUF_H
#define _PARISC_MSGBUF_H
/*
* The msqid64_ds structure for parisc architecture, copied from sparc.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*/
struct msqid64_ds {
struct ipc64_perm msg_perm;
#ifndef CONFIG_64BIT
unsigned int __pad1;
#endif
__kernel_time_t msg_stime; /* last msgsnd time */
#ifndef CONFIG_64BIT
unsigned int __pad2;
#endif
__kernel_time_t msg_rtime; /* last msgrcv time */
#ifndef CONFIG_64BIT
unsigned int __pad3;
#endif
__kernel_time_t msg_ctime; /* last change time */
unsigned int msg_cbytes; /* current number of bytes on queue */
unsigned int msg_qnum; /* number of messages in queue */
unsigned int msg_qbytes; /* max number of bytes on queue */
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
__kernel_pid_t msg_lrpid; /* last receive pid */
unsigned int __unused1;
unsigned int __unused2;
};
#endif /* _PARISC_MSGBUF_H */

View File

@@ -0,0 +1,9 @@
/*
* Pull in the generic implementation for the mutex fastpath.
*
* TODO: implement optimized primitives instead, or leave the generic
* implementation in place, or pick the atomic_xchg() based generic
* implementation. (see asm-generic/mutex-xchg.h for details)
*/
#include <asm-generic/mutex-dec.h>

View File

@@ -0,0 +1,173 @@
#ifndef _PARISC_PAGE_H
#define _PARISC_PAGE_H
#include <linux/const.h>
#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
# define PAGE_SHIFT 12
#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
# define PAGE_SHIFT 14
#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
# define PAGE_SHIFT 16
#else
# error "unknown default kernel page size"
#endif
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifndef __ASSEMBLY__
#include <asm/types.h>
#include <asm/cache.h>
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
#define copy_page(to,from) copy_user_page_asm((void *)(to), (void *)(from))
struct page;
void copy_user_page_asm(void *to, void *from);
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *pg);
void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
/*
* These are used to make use of C type-checking..
*/
#define STRICT_MM_TYPECHECKS
#ifdef STRICT_MM_TYPECHECKS
typedef struct { unsigned long pte;
#if !defined(CONFIG_64BIT)
unsigned long future_flags;
/* XXX: it's possible to remove future_flags and change BITS_PER_PTE_ENTRY
to 2, but then strangely the identical 32bit kernel boots on a
c3000(pa20), but not any longer on a 715(pa11).
Still investigating... HelgeD.
*/
#endif
} pte_t; /* either 32 or 64bit */
/* NOTE: even on 64 bits, these entries are __u32 because we allocate
* the pmd and pgd in ZONE_DMA (i.e. under 4GB) */
typedef struct { __u32 pmd; } pmd_t;
typedef struct { __u32 pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
/* These do not work lvalues, so make sure we don't use them as such. */
#define pmd_val(x) ((x).pmd + 0)
#define pgd_val(x) ((x).pgd + 0)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
#define __pmd_val_set(x,n) (x).pmd = (n)
#define __pgd_val_set(x,n) (x).pgd = (n)
#else
/*
* .. while these make it easier on the compiler
*/
typedef unsigned long pte_t;
typedef __u32 pmd_t;
typedef __u32 pgd_t;
typedef unsigned long pgprot_t;
#define pte_val(x) (x)
#define pmd_val(x) (x)
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
#define __pte(x) (x)
#define __pmd(x) (x)
#define __pgd(x) (x)
#define __pgprot(x) (x)
#define __pmd_val_set(x,n) (x) = (n)
#define __pgd_val_set(x,n) (x) = (n)
#endif /* STRICT_MM_TYPECHECKS */
typedef struct page *pgtable_t;
typedef struct __physmem_range {
unsigned long start_pfn;
unsigned long pages; /* PAGE_SIZE pages */
} physmem_range_t;
extern physmem_range_t pmem_ranges[];
extern int npmem_ranges;
#endif /* !__ASSEMBLY__ */
/* WARNING: The definitions below must match exactly to sizeof(pte_t)
* etc
*/
#ifdef CONFIG_64BIT
#define BITS_PER_PTE_ENTRY 3
#define BITS_PER_PMD_ENTRY 2
#define BITS_PER_PGD_ENTRY 2
#else
#define BITS_PER_PTE_ENTRY 3
#define BITS_PER_PMD_ENTRY 2
#define BITS_PER_PGD_ENTRY BITS_PER_PMD_ENTRY
#endif
#define PGD_ENTRY_SIZE (1UL << BITS_PER_PGD_ENTRY)
#define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY)
#define PTE_ENTRY_SIZE (1UL << BITS_PER_PTE_ENTRY)
#define LINUX_GATEWAY_SPACE 0
/* This governs the relationship between virtual and physical addresses.
* If you alter it, make sure to take care of our various fixed mapping
* segments in fixmap.h */
#ifdef CONFIG_64BIT
#define __PAGE_OFFSET (0x40000000) /* 1GB */
#else
#define __PAGE_OFFSET (0x10000000) /* 256MB */
#endif
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
/* The size of the gateway page (we leave lots of room for expansion) */
#define GATEWAY_PAGE_SIZE 0x4000
/* The start of the actual kernel binary---used in vmlinux.lds.S
* Leave some space after __PAGE_OFFSET for detecting kernel null
* ptr derefs */
#define KERNEL_BINARY_TEXT_START (__PAGE_OFFSET + 0x100000)
/* These macros don't work for 64-bit C code -- don't allow in C at all */
#ifdef __ASSEMBLY__
# define PA(x) ((x)-__PAGE_OFFSET)
# define VA(x) ((x)+__PAGE_OFFSET)
#endif
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#ifndef CONFIG_DISCONTIGMEM
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif /* CONFIG_DISCONTIGMEM */
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#endif
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
#endif /* _PARISC_PAGE_H */

View File

@@ -0,0 +1,22 @@
#ifndef _ASMPARISC_PARAM_H
#define _ASMPARISC_PARAM_H
#ifdef __KERNEL__
#define HZ CONFIG_HZ
#define USER_HZ 100 /* some user API use "ticks" */
#define CLOCKS_PER_SEC (USER_HZ) /* like times() */
#endif
#ifndef HZ
#define HZ 100
#endif
#define EXEC_PAGESIZE 4096
#ifndef NOGROUP
#define NOGROUP (-1)
#endif
#define MAXHOSTNAMELEN 64 /* max length of hostname */
#endif

View File

@@ -0,0 +1,64 @@
#ifndef _ASM_PARISC_PARISC_DEVICE_H_
#define _ASM_PARISC_PARISC_DEVICE_H_
#include <linux/device.h>
struct parisc_device {
struct resource hpa; /* Hard Physical Address */
struct parisc_device_id id;
struct parisc_driver *driver; /* Driver for this device */
char name[80]; /* The hardware description */
int irq;
int aux_irq; /* Some devices have a second IRQ */
char hw_path; /* The module number on this bus */
unsigned int num_addrs; /* some devices have additional address ranges. */
unsigned long *addr; /* which will be stored here */
#ifdef CONFIG_64BIT
/* parms for pdc_pat_cell_module() call */
unsigned long pcell_loc; /* Physical Cell location */
unsigned long mod_index; /* PAT specific - Misc Module info */
/* generic info returned from pdc_pat_cell_module() */
unsigned long mod_info; /* PAT specific - Misc Module info */
unsigned long pmod_loc; /* physical Module location */
#endif
u64 dma_mask; /* DMA mask for I/O */
struct device dev;
};
struct parisc_driver {
struct parisc_driver *next;
char *name;
const struct parisc_device_id *id_table;
int (*probe) (struct parisc_device *dev); /* New device discovered */
int (*remove) (struct parisc_device *dev);
struct device_driver drv;
};
#define to_parisc_device(d) container_of(d, struct parisc_device, dev)
#define to_parisc_driver(d) container_of(d, struct parisc_driver, drv)
#define parisc_parent(d) to_parisc_device(d->dev.parent)
static inline char *parisc_pathname(struct parisc_device *d)
{
return d->dev.bus_id;
}
static inline void
parisc_set_drvdata(struct parisc_device *d, void *p)
{
dev_set_drvdata(&d->dev, p);
}
static inline void *
parisc_get_drvdata(struct parisc_device *d)
{
return dev_get_drvdata(&d->dev);
}
extern struct bus_type parisc_bus_type;
#endif /*_ASM_PARISC_PARISC_DEVICE_H_*/

View File

@@ -0,0 +1,18 @@
/*
*
* parport.h: ia32-compatible parport initialisation
*
* This file should only be included by drivers/parport/parport_pc.c.
*/
#ifndef _ASM_PARPORT_H
#define _ASM_PARPORT_H 1
static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
{
/* nothing ! */
return 0;
}
#endif /* !(_ASM_PARPORT_H) */

View File

@@ -0,0 +1,294 @@
#ifndef __ASM_PARISC_PCI_H
#define __ASM_PARISC_PCI_H
#include <asm/scatterlist.h>
/*
** HP PCI platforms generally support multiple bus adapters.
** (workstations 1-~4, servers 2-~32)
**
** Newer platforms number the busses across PCI bus adapters *sparsely*.
** E.g. 0, 8, 16, ...
**
** Under a PCI bus, most HP platforms support PPBs up to two or three
** levels deep. See "Bit3" product line.
*/
#define PCI_MAX_BUSSES 256
/* To be used as: mdelay(pci_post_reset_delay);
*
* post_reset is the time the kernel should stall to prevent anyone from
* accessing the PCI bus once #RESET is de-asserted.
* PCI spec somewhere says 1 second but with multi-PCI bus systems,
* this makes the boot time much longer than necessary.
* 20ms seems to work for all the HP PCI implementations to date.
*/
#define pci_post_reset_delay 50
/*
** pci_hba_data (aka H2P_OBJECT in HP/UX)
**
** This is the "common" or "base" data structure which HBA drivers
** (eg Dino or LBA) are required to place at the top of their own
** platform_data structure. I've heard this called "C inheritance" too.
**
** Data needed by pcibios layer belongs here.
*/
struct pci_hba_data {
void __iomem *base_addr; /* aka Host Physical Address */
const struct parisc_device *dev; /* device from PA bus walk */
struct pci_bus *hba_bus; /* primary PCI bus below HBA */
int hba_num; /* I/O port space access "key" */
struct resource bus_num; /* PCI bus numbers */
struct resource io_space; /* PIOP */
struct resource lmmio_space; /* bus addresses < 4Gb */
struct resource elmmio_space; /* additional bus addresses < 4Gb */
struct resource gmmio_space; /* bus addresses > 4Gb */
/* NOTE: Dino code assumes it can use *all* of the lmmio_space,
* elmmio_space and gmmio_space as a contiguous array of
* resources. This #define represents the array size */
#define DINO_MAX_LMMIO_RESOURCES 3
unsigned long lmmio_space_offset; /* CPU view - PCI view */
void * iommu; /* IOMMU this device is under */
/* REVISIT - spinlock to protect resources? */
#define HBA_NAME_SIZE 16
char io_name[HBA_NAME_SIZE];
char lmmio_name[HBA_NAME_SIZE];
char elmmio_name[HBA_NAME_SIZE];
char gmmio_name[HBA_NAME_SIZE];
};
#define HBA_DATA(d) ((struct pci_hba_data *) (d))
/*
** We support 2^16 I/O ports per HBA. These are set up in the form
** 0xbbxxxx, where bb is the bus number and xxxx is the I/O port
** space address.
*/
#define HBA_PORT_SPACE_BITS 16
#define HBA_PORT_BASE(h) ((h) << HBA_PORT_SPACE_BITS)
#define HBA_PORT_SPACE_SIZE (1UL << HBA_PORT_SPACE_BITS)
#define PCI_PORT_HBA(a) ((a) >> HBA_PORT_SPACE_BITS)
#define PCI_PORT_ADDR(a) ((a) & (HBA_PORT_SPACE_SIZE - 1))
#ifdef CONFIG_64BIT
#define PCI_F_EXTEND 0xffffffff00000000UL
#define PCI_IS_LMMIO(hba,a) pci_is_lmmio(hba,a)
/* We need to know if an address is LMMMIO or GMMIO.
* LMMIO requires mangling and GMMIO we must use as-is.
*/
static __inline__ int pci_is_lmmio(struct pci_hba_data *hba, unsigned long a)
{
return(((a) & PCI_F_EXTEND) == PCI_F_EXTEND);
}
/*
** Convert between PCI (IO_VIEW) addresses and processor (PA_VIEW) addresses.
** See pci.c for more conversions used by Generic PCI code.
**
** Platform characteristics/firmware guarantee that
** (1) PA_VIEW - IO_VIEW = lmmio_offset for both LMMIO and ELMMIO
** (2) PA_VIEW == IO_VIEW for GMMIO
*/
#define PCI_BUS_ADDR(hba,a) (PCI_IS_LMMIO(hba,a) \
? ((a) - hba->lmmio_space_offset) /* mangle LMMIO */ \
: (a)) /* GMMIO */
#define PCI_HOST_ADDR(hba,a) (((a) & PCI_F_EXTEND) == 0 \
? (a) + hba->lmmio_space_offset \
: (a))
#else /* !CONFIG_64BIT */
#define PCI_BUS_ADDR(hba,a) (a)
#define PCI_HOST_ADDR(hba,a) (a)
#define PCI_F_EXTEND 0UL
#define PCI_IS_LMMIO(hba,a) (1) /* 32-bit doesn't support GMMIO */
#endif /* !CONFIG_64BIT */
/*
** KLUGE: linux/pci.h include asm/pci.h BEFORE declaring struct pci_bus
** (This eliminates some of the warnings).
*/
struct pci_bus;
struct pci_dev;
/*
* If the PCI device's view of memory is the same as the CPU's view of memory,
* PCI_DMA_BUS_IS_PHYS is true. The networking and block device layers use
* this boolean for bounce buffer decisions.
*/
#ifdef CONFIG_PA20
/* All PA-2.0 machines have an IOMMU. */
#define PCI_DMA_BUS_IS_PHYS 0
#define parisc_has_iommu() do { } while (0)
#else
#if defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA)
extern int parisc_bus_is_phys; /* in arch/parisc/kernel/setup.c */
#define PCI_DMA_BUS_IS_PHYS parisc_bus_is_phys
#define parisc_has_iommu() do { parisc_bus_is_phys = 0; } while (0)
#else
#define PCI_DMA_BUS_IS_PHYS 1
#define parisc_has_iommu() do { } while (0)
#endif
#endif /* !CONFIG_PA20 */
/*
** Most PCI devices (eg Tulip, NCR720) also export the same registers
** to both MMIO and I/O port space. Due to poor performance of I/O Port
** access under HP PCI bus adapters, strongly recommend the use of MMIO
** address space.
**
** While I'm at it more PA programming notes:
**
** 1) MMIO stores (writes) are posted operations. This means the processor
** gets an "ACK" before the write actually gets to the device. A read
** to the same device (or typically the bus adapter above it) will
** force in-flight write transaction(s) out to the targeted device
** before the read can complete.
**
** 2) The Programmed I/O (PIO) data may not always be strongly ordered with
** respect to DMA on all platforms. Ie PIO data can reach the processor
** before in-flight DMA reaches memory. Since most SMP PA platforms
** are I/O coherent, it generally doesn't matter...but sometimes
** it does.
**
** I've helped device driver writers debug both types of problems.
*/
struct pci_port_ops {
u8 (*inb) (struct pci_hba_data *hba, u16 port);
u16 (*inw) (struct pci_hba_data *hba, u16 port);
u32 (*inl) (struct pci_hba_data *hba, u16 port);
void (*outb) (struct pci_hba_data *hba, u16 port, u8 data);
void (*outw) (struct pci_hba_data *hba, u16 port, u16 data);
void (*outl) (struct pci_hba_data *hba, u16 port, u32 data);
};
struct pci_bios_ops {
void (*init)(void);
void (*fixup_bus)(struct pci_bus *bus);
};
/* pci_unmap_{single,page} is not a nop, thus... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
__u32 LEN_NAME;
#define pci_unmap_addr(PTR, ADDR_NAME) \
((PTR)->ADDR_NAME)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
(((PTR)->ADDR_NAME) = (VAL))
#define pci_unmap_len(PTR, LEN_NAME) \
((PTR)->LEN_NAME)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL))
/*
** Stuff declared in arch/parisc/kernel/pci.c
*/
extern struct pci_port_ops *pci_port;
extern struct pci_bios_ops *pci_bios;
#ifdef CONFIG_PCI
extern void pcibios_register_hba(struct pci_hba_data *);
extern void pcibios_set_master(struct pci_dev *);
#else
static inline void pcibios_register_hba(struct pci_hba_data *x)
{
}
#endif
/*
* pcibios_assign_all_busses() is used in drivers/pci/pci.c:pci_do_scan_bus()
* 0 == check if bridge is numbered before re-numbering.
* 1 == pci_do_scan_bus() should automatically number all PCI-PCI bridges.
*
* We *should* set this to zero for "legacy" platforms and one
* for PAT platforms.
*
* But legacy platforms also need to renumber the busses below a Host
* Bus controller. Adding a 4-port Tulip card on the first PCI root
* bus of a C200 resulted in the secondary bus being numbered as 1.
* The second PCI host bus controller's root bus had already been
* assigned bus number 1 by firmware and sysfs complained.
*
* Firmware isn't doing anything wrong here since each controller
* is its own PCI domain. It's simpler and easier for us to renumber
* the busses rather than treat each Dino as a separate PCI domain.
* Eventually, we may want to introduce PCI domains for Superdome or
* rp7420/8420 boxes and then revisit this issue.
*/
#define pcibios_assign_all_busses() (1)
#define pcibios_scan_all_fns(a, b) (0)
#define PCIBIOS_MIN_IO 0x10
#define PCIBIOS_MIN_MEM 0x1000 /* NBPG - but pci/setup-res.c dies */
/* export the pci_ DMA API in terms of the dma_ one */
#include <asm-generic/pci-dma-compat.h>
#ifdef CONFIG_PCI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
unsigned long cacheline_size;
u8 byte;
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
if (byte == 0)
cacheline_size = 1024;
else
cacheline_size = (int) byte * 4;
*strat = PCI_DMA_BURST_MULTIPLE;
*strategy_parameter = cacheline_size;
}
#endif
extern void
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res);
extern void
pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
struct pci_bus_region *region);
static inline struct resource *
pcibios_select_root(struct pci_dev *pdev, struct resource *res)
{
struct resource *root = NULL;
if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
if (res->flags & IORESOURCE_MEM)
root = &iomem_resource;
return root;
}
static inline void pcibios_penalize_isa_irq(int irq, int active)
{
/* We don't need to penalize isa irq's */
}
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return channel ? 15 : 14;
}
#endif /* __ASM_PARISC_PCI_H */

View File

@@ -0,0 +1,762 @@
#ifndef _PARISC_PDC_H
#define _PARISC_PDC_H
/*
* PDC return values ...
* All PDC calls return a subset of these errors.
*/
#define PDC_WARN 3 /* Call completed with a warning */
#define PDC_REQ_ERR_1 2 /* See above */
#define PDC_REQ_ERR_0 1 /* Call would generate a requestor error */
#define PDC_OK 0 /* Call completed successfully */
#define PDC_BAD_PROC -1 /* Called non-existent procedure*/
#define PDC_BAD_OPTION -2 /* Called with non-existent option */
#define PDC_ERROR -3 /* Call could not complete without an error */
#define PDC_NE_MOD -5 /* Module not found */
#define PDC_NE_CELL_MOD -7 /* Cell module not found */
#define PDC_INVALID_ARG -10 /* Called with an invalid argument */
#define PDC_BUS_POW_WARN -12 /* Call could not complete in allowed power budget */
#define PDC_NOT_NARROW -17 /* Narrow mode not supported */
/*
* PDC entry points...
*/
#define PDC_POW_FAIL 1 /* perform a power-fail */
#define PDC_POW_FAIL_PREPARE 0 /* prepare for powerfail */
#define PDC_CHASSIS 2 /* PDC-chassis functions */
#define PDC_CHASSIS_DISP 0 /* update chassis display */
#define PDC_CHASSIS_WARN 1 /* return chassis warnings */
#define PDC_CHASSIS_DISPWARN 2 /* update&return chassis status */
#define PDC_RETURN_CHASSIS_INFO 128 /* HVERSION dependent: return chassis LED/LCD info */
#define PDC_PIM 3 /* Get PIM data */
#define PDC_PIM_HPMC 0 /* Transfer HPMC data */
#define PDC_PIM_RETURN_SIZE 1 /* Get Max buffer needed for PIM*/
#define PDC_PIM_LPMC 2 /* Transfer HPMC data */
#define PDC_PIM_SOFT_BOOT 3 /* Transfer Soft Boot data */
#define PDC_PIM_TOC 4 /* Transfer TOC data */
#define PDC_MODEL 4 /* PDC model information call */
#define PDC_MODEL_INFO 0 /* returns information */
#define PDC_MODEL_BOOTID 1 /* set the BOOT_ID */
#define PDC_MODEL_VERSIONS 2 /* returns cpu-internal versions*/
#define PDC_MODEL_SYSMODEL 3 /* return system model info */
#define PDC_MODEL_ENSPEC 4 /* enable specific option */
#define PDC_MODEL_DISPEC 5 /* disable specific option */
#define PDC_MODEL_CPU_ID 6 /* returns cpu-id (only newer machines!) */
#define PDC_MODEL_CAPABILITIES 7 /* returns OS32/OS64-flags */
/* Values for PDC_MODEL_CAPABILITIES non-equivalent virtual aliasing support */
#define PDC_MODEL_IOPDIR_FDC (1 << 2)
#define PDC_MODEL_NVA_MASK (3 << 4)
#define PDC_MODEL_NVA_SUPPORTED (0 << 4)
#define PDC_MODEL_NVA_SLOW (1 << 4)
#define PDC_MODEL_NVA_UNSUPPORTED (3 << 4)
#define PDC_MODEL_GET_BOOT__OP 8 /* returns boot test options */
#define PDC_MODEL_SET_BOOT__OP 9 /* set boot test options */
#define PA89_INSTRUCTION_SET 0x4 /* capatibilies returned */
#define PA90_INSTRUCTION_SET 0x8
#define PDC_CACHE 5 /* return/set cache (& TLB) info*/
#define PDC_CACHE_INFO 0 /* returns information */
#define PDC_CACHE_SET_COH 1 /* set coherence state */
#define PDC_CACHE_RET_SPID 2 /* returns space-ID bits */
#define PDC_HPA 6 /* return HPA of processor */
#define PDC_HPA_PROCESSOR 0
#define PDC_HPA_MODULES 1
#define PDC_COPROC 7 /* Co-Processor (usually FP unit(s)) */
#define PDC_COPROC_CFG 0 /* Co-Processor Cfg (FP unit(s) enabled?) */
#define PDC_IODC 8 /* talk to IODC */
#define PDC_IODC_READ 0 /* read IODC entry point */
/* PDC_IODC_RI_ * INDEX parameter of PDC_IODC_READ */
#define PDC_IODC_RI_DATA_BYTES 0 /* IODC Data Bytes */
/* 1, 2 obsolete - HVERSION dependent*/
#define PDC_IODC_RI_INIT 3 /* Initialize module */
#define PDC_IODC_RI_IO 4 /* Module input/output */
#define PDC_IODC_RI_SPA 5 /* Module input/output */
#define PDC_IODC_RI_CONFIG 6 /* Module input/output */
/* 7 obsolete - HVERSION dependent */
#define PDC_IODC_RI_TEST 8 /* Module input/output */
#define PDC_IODC_RI_TLB 9 /* Module input/output */
#define PDC_IODC_NINIT 2 /* non-destructive init */
#define PDC_IODC_DINIT 3 /* destructive init */
#define PDC_IODC_MEMERR 4 /* check for memory errors */
#define PDC_IODC_INDEX_DATA 0 /* get first 16 bytes from mod IODC */
#define PDC_IODC_BUS_ERROR -4 /* bus error return value */
#define PDC_IODC_INVALID_INDEX -5 /* invalid index return value */
#define PDC_IODC_COUNT -6 /* count is too small */
#define PDC_TOD 9 /* time-of-day clock (TOD) */
#define PDC_TOD_READ 0 /* read TOD */
#define PDC_TOD_WRITE 1 /* write TOD */
#define PDC_STABLE 10 /* stable storage (sprockets) */
#define PDC_STABLE_READ 0
#define PDC_STABLE_WRITE 1
#define PDC_STABLE_RETURN_SIZE 2
#define PDC_STABLE_VERIFY_CONTENTS 3
#define PDC_STABLE_INITIALIZE 4
#define PDC_NVOLATILE 11 /* often not implemented */
#define PDC_ADD_VALID 12 /* Memory validation PDC call */
#define PDC_ADD_VALID_VERIFY 0 /* Make PDC_ADD_VALID verify region */
#define PDC_INSTR 15 /* get instr to invoke PDCE_CHECK() */
#define PDC_PROC 16 /* (sprockets) */
#define PDC_CONFIG 16 /* (sprockets) */
#define PDC_CONFIG_DECONFIG 0
#define PDC_CONFIG_DRECONFIG 1
#define PDC_CONFIG_DRETURN_CONFIG 2
#define PDC_BLOCK_TLB 18 /* manage hardware block-TLB */
#define PDC_BTLB_INFO 0 /* returns parameter */
#define PDC_BTLB_INSERT 1 /* insert BTLB entry */
#define PDC_BTLB_PURGE 2 /* purge BTLB entries */
#define PDC_BTLB_PURGE_ALL 3 /* purge all BTLB entries */
#define PDC_TLB 19 /* manage hardware TLB miss handling */
#define PDC_TLB_INFO 0 /* returns parameter */
#define PDC_TLB_SETUP 1 /* set up miss handling */
#define PDC_MEM 20 /* Manage memory */
#define PDC_MEM_MEMINFO 0
#define PDC_MEM_ADD_PAGE 1
#define PDC_MEM_CLEAR_PDT 2
#define PDC_MEM_READ_PDT 3
#define PDC_MEM_RESET_CLEAR 4
#define PDC_MEM_GOODMEM 5
#define PDC_MEM_TABLE 128 /* Non contig mem map (sprockets) */
#define PDC_MEM_RETURN_ADDRESS_TABLE PDC_MEM_TABLE
#define PDC_MEM_GET_MEMORY_SYSTEM_TABLES_SIZE 131
#define PDC_MEM_GET_MEMORY_SYSTEM_TABLES 132
#define PDC_MEM_GET_PHYSICAL_LOCATION_FROM_MEMORY_ADDRESS 133
#define PDC_MEM_RET_SBE_REPLACED 5 /* PDC_MEM return values */
#define PDC_MEM_RET_DUPLICATE_ENTRY 4
#define PDC_MEM_RET_BUF_SIZE_SMALL 1
#define PDC_MEM_RET_PDT_FULL -11
#define PDC_MEM_RET_INVALID_PHYSICAL_LOCATION ~0ULL
#define PDC_PSW 21 /* Get/Set default System Mask */
#define PDC_PSW_MASK 0 /* Return mask */
#define PDC_PSW_GET_DEFAULTS 1 /* Return defaults */
#define PDC_PSW_SET_DEFAULTS 2 /* Set default */
#define PDC_PSW_ENDIAN_BIT 1 /* set for big endian */
#define PDC_PSW_WIDE_BIT 2 /* set for wide mode */
#define PDC_SYSTEM_MAP 22 /* find system modules */
#define PDC_FIND_MODULE 0
#define PDC_FIND_ADDRESS 1
#define PDC_TRANSLATE_PATH 2
#define PDC_SOFT_POWER 23 /* soft power switch */
#define PDC_SOFT_POWER_INFO 0 /* return info about the soft power switch */
#define PDC_SOFT_POWER_ENABLE 1 /* enable/disable soft power switch */
/* HVERSION dependent */
/* The PDC_MEM_MAP calls */
#define PDC_MEM_MAP 128 /* on s700: return page info */
#define PDC_MEM_MAP_HPA 0 /* returns hpa of a module */
#define PDC_EEPROM 129 /* EEPROM access */
#define PDC_EEPROM_READ_WORD 0
#define PDC_EEPROM_WRITE_WORD 1
#define PDC_EEPROM_READ_BYTE 2
#define PDC_EEPROM_WRITE_BYTE 3
#define PDC_EEPROM_EEPROM_PASSWORD -1000
#define PDC_NVM 130 /* NVM (non-volatile memory) access */
#define PDC_NVM_READ_WORD 0
#define PDC_NVM_WRITE_WORD 1
#define PDC_NVM_READ_BYTE 2
#define PDC_NVM_WRITE_BYTE 3
#define PDC_SEED_ERROR 132 /* (sprockets) */
#define PDC_IO 135 /* log error info, reset IO system */
#define PDC_IO_READ_AND_CLEAR_ERRORS 0
#define PDC_IO_RESET 1
#define PDC_IO_RESET_DEVICES 2
/* sets bits 6&7 (little endian) of the HcControl Register */
#define PDC_IO_USB_SUSPEND 0xC000000000000000
#define PDC_IO_EEPROM_IO_ERR_TABLE_FULL -5 /* return value */
#define PDC_IO_NO_SUSPEND -6 /* return value */
#define PDC_BROADCAST_RESET 136 /* reset all processors */
#define PDC_DO_RESET 0 /* option: perform a broadcast reset */
#define PDC_DO_FIRM_TEST_RESET 1 /* Do broadcast reset with bitmap */
#define PDC_BR_RECONFIGURATION 2 /* reset w/reconfiguration */
#define PDC_FIRM_TEST_MAGIC 0xab9ec36fUL /* for this reboot only */
#define PDC_LAN_STATION_ID 138 /* Hversion dependent mechanism for */
#define PDC_LAN_STATION_ID_READ 0 /* getting the lan station address */
#define PDC_LAN_STATION_ID_SIZE 6
#define PDC_CHECK_RANGES 139 /* (sprockets) */
#define PDC_NV_SECTIONS 141 /* (sprockets) */
#define PDC_PERFORMANCE 142 /* performance monitoring */
#define PDC_SYSTEM_INFO 143 /* system information */
#define PDC_SYSINFO_RETURN_INFO_SIZE 0
#define PDC_SYSINFO_RRETURN_SYS_INFO 1
#define PDC_SYSINFO_RRETURN_ERRORS 2
#define PDC_SYSINFO_RRETURN_WARNINGS 3
#define PDC_SYSINFO_RETURN_REVISIONS 4
#define PDC_SYSINFO_RRETURN_DIAGNOSE 5
#define PDC_SYSINFO_RRETURN_HV_DIAGNOSE 1005
#define PDC_RDR 144 /* (sprockets) */
#define PDC_RDR_READ_BUFFER 0
#define PDC_RDR_READ_SINGLE 1
#define PDC_RDR_WRITE_SINGLE 2
#define PDC_INTRIGUE 145 /* (sprockets) */
#define PDC_INTRIGUE_WRITE_BUFFER 0
#define PDC_INTRIGUE_GET_SCRATCH_BUFSIZE 1
#define PDC_INTRIGUE_START_CPU_COUNTERS 2
#define PDC_INTRIGUE_STOP_CPU_COUNTERS 3
#define PDC_STI 146 /* STI access */
/* same as PDC_PCI_XXX values (see below) */
/* Legacy PDC definitions for same stuff */
#define PDC_PCI_INDEX 147
#define PDC_PCI_INTERFACE_INFO 0
#define PDC_PCI_SLOT_INFO 1
#define PDC_PCI_INFLIGHT_BYTES 2
#define PDC_PCI_READ_CONFIG 3
#define PDC_PCI_WRITE_CONFIG 4
#define PDC_PCI_READ_PCI_IO 5
#define PDC_PCI_WRITE_PCI_IO 6
#define PDC_PCI_READ_CONFIG_DELAY 7
#define PDC_PCI_UPDATE_CONFIG_DELAY 8
#define PDC_PCI_PCI_PATH_TO_PCI_HPA 9
#define PDC_PCI_PCI_HPA_TO_PCI_PATH 10
#define PDC_PCI_PCI_PATH_TO_PCI_BUS 11
#define PDC_PCI_PCI_RESERVED 12
#define PDC_PCI_PCI_INT_ROUTE_SIZE 13
#define PDC_PCI_GET_INT_TBL_SIZE PDC_PCI_PCI_INT_ROUTE_SIZE
#define PDC_PCI_PCI_INT_ROUTE 14
#define PDC_PCI_GET_INT_TBL PDC_PCI_PCI_INT_ROUTE
#define PDC_PCI_READ_MON_TYPE 15
#define PDC_PCI_WRITE_MON_TYPE 16
/* Get SCSI Interface Card info: SDTR, SCSI ID, mode (SE vs LVD) */
#define PDC_INITIATOR 163
#define PDC_GET_INITIATOR 0
#define PDC_SET_INITIATOR 1
#define PDC_DELETE_INITIATOR 2
#define PDC_RETURN_TABLE_SIZE 3
#define PDC_RETURN_TABLE 4
#define PDC_LINK 165 /* (sprockets) */
#define PDC_LINK_PCI_ENTRY_POINTS 0 /* list (Arg1) = 0 */
#define PDC_LINK_USB_ENTRY_POINTS 1 /* list (Arg1) = 1 */
/* cl_class
* page 3-33 of IO-Firmware ARS
* IODC ENTRY_INIT(Search first) RET[1]
*/
#define CL_NULL 0 /* invalid */
#define CL_RANDOM 1 /* random access (as disk) */
#define CL_SEQU 2 /* sequential access (as tape) */
#define CL_DUPLEX 7 /* full-duplex point-to-point (RS-232, Net) */
#define CL_KEYBD 8 /* half-duplex console (HIL Keyboard) */
#define CL_DISPL 9 /* half-duplex console (display) */
#define CL_FC 10 /* FiberChannel access media */
/* IODC ENTRY_INIT() */
#define ENTRY_INIT_SRCH_FRST 2
#define ENTRY_INIT_SRCH_NEXT 3
#define ENTRY_INIT_MOD_DEV 4
#define ENTRY_INIT_DEV 5
#define ENTRY_INIT_MOD 6
#define ENTRY_INIT_MSG 9
/* IODC ENTRY_IO() */
#define ENTRY_IO_BOOTIN 0
#define ENTRY_IO_BOOTOUT 1
#define ENTRY_IO_CIN 2
#define ENTRY_IO_COUT 3
#define ENTRY_IO_CLOSE 4
#define ENTRY_IO_GETMSG 9
#define ENTRY_IO_BBLOCK_IN 16
#define ENTRY_IO_BBLOCK_OUT 17
/* IODC ENTRY_SPA() */
/* IODC ENTRY_CONFIG() */
/* IODC ENTRY_TEST() */
/* IODC ENTRY_TLB() */
/* constants for OS (NVM...) */
#define OS_ID_NONE 0 /* Undefined OS ID */
#define OS_ID_HPUX 1 /* HP-UX OS */
#define OS_ID_MPEXL 2 /* MPE XL OS */
#define OS_ID_OSF 3 /* OSF OS */
#define OS_ID_HPRT 4 /* HP-RT OS */
#define OS_ID_NOVEL 5 /* NOVELL OS */
#define OS_ID_LINUX 6 /* Linux */
/* constants for PDC_CHASSIS */
#define OSTAT_OFF 0
#define OSTAT_FLT 1
#define OSTAT_TEST 2
#define OSTAT_INIT 3
#define OSTAT_SHUT 4
#define OSTAT_WARN 5
#define OSTAT_RUN 6
#define OSTAT_ON 7
/* Page Zero constant offsets used by the HPMC handler */
#define BOOT_CONSOLE_HPA_OFFSET 0x3c0
#define BOOT_CONSOLE_SPA_OFFSET 0x3c4
#define BOOT_CONSOLE_PATH_OFFSET 0x3a8
/* size of the pdc_result buffer for firmware.c */
#define NUM_PDC_RESULT 32
#if !defined(__ASSEMBLY__)
#ifdef __KERNEL__
#include <linux/types.h>
extern int pdc_type;
/* Values for pdc_type */
#define PDC_TYPE_ILLEGAL -1
#define PDC_TYPE_PAT 0 /* 64-bit PAT-PDC */
#define PDC_TYPE_SYSTEM_MAP 1 /* 32-bit, but supports PDC_SYSTEM_MAP */
#define PDC_TYPE_SNAKE 2 /* Doesn't support SYSTEM_MAP */
struct pdc_chassis_info { /* for PDC_CHASSIS_INFO */
unsigned long actcnt; /* actual number of bytes returned */
unsigned long maxcnt; /* maximum number of bytes that could be returned */
};
struct pdc_coproc_cfg { /* for PDC_COPROC_CFG */
unsigned long ccr_functional;
unsigned long ccr_present;
unsigned long revision;
unsigned long model;
};
struct pdc_model { /* for PDC_MODEL */
unsigned long hversion;
unsigned long sversion;
unsigned long hw_id;
unsigned long boot_id;
unsigned long sw_id;
unsigned long sw_cap;
unsigned long arch_rev;
unsigned long pot_key;
unsigned long curr_key;
};
struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
unsigned long
#ifdef CONFIG_64BIT
cc_padW:32,
#endif
cc_alias: 4, /* alias boundaries for virtual addresses */
cc_block: 4, /* to determine most efficient stride */
cc_line : 3, /* maximum amount written back as a result of store (multiple of 16 bytes) */
cc_shift: 2, /* how much to shift cc_block left */
cc_wt : 1, /* 0 = WT-Dcache, 1 = WB-Dcache */
cc_sh : 2, /* 0 = separate I/D-cache, else shared I/D-cache */
cc_cst : 3, /* 0 = incoherent D-cache, 1=coherent D-cache */
cc_pad1 : 10, /* reserved */
cc_hv : 3; /* hversion dependent */
};
struct pdc_tlb_cf { /* for PDC_CACHE (I/D-TLB's) */
unsigned long tc_pad0:12, /* reserved */
#ifdef CONFIG_64BIT
tc_padW:32,
#endif
tc_sh : 2, /* 0 = separate I/D-TLB, else shared I/D-TLB */
tc_hv : 1, /* HV */
tc_page : 1, /* 0 = 2K page-size-machine, 1 = 4k page size */
tc_cst : 3, /* 0 = incoherent operations, else coherent operations */
tc_aid : 5, /* ITLB: width of access ids of processor (encoded!) */
tc_pad1 : 8; /* ITLB: width of space-registers (encoded) */
};
struct pdc_cache_info { /* main-PDC_CACHE-structure (caches & TLB's) */
/* I-cache */
unsigned long ic_size; /* size in bytes */
struct pdc_cache_cf ic_conf; /* configuration */
unsigned long ic_base; /* base-addr */
unsigned long ic_stride;
unsigned long ic_count;
unsigned long ic_loop;
/* D-cache */
unsigned long dc_size; /* size in bytes */
struct pdc_cache_cf dc_conf; /* configuration */
unsigned long dc_base; /* base-addr */
unsigned long dc_stride;
unsigned long dc_count;
unsigned long dc_loop;
/* Instruction-TLB */
unsigned long it_size; /* number of entries in I-TLB */
struct pdc_tlb_cf it_conf; /* I-TLB-configuration */
unsigned long it_sp_base;
unsigned long it_sp_stride;
unsigned long it_sp_count;
unsigned long it_off_base;
unsigned long it_off_stride;
unsigned long it_off_count;
unsigned long it_loop;
/* data-TLB */
unsigned long dt_size; /* number of entries in D-TLB */
struct pdc_tlb_cf dt_conf; /* D-TLB-configuration */
unsigned long dt_sp_base;
unsigned long dt_sp_stride;
unsigned long dt_sp_count;
unsigned long dt_off_base;
unsigned long dt_off_stride;
unsigned long dt_off_count;
unsigned long dt_loop;
};
#if 0
/* If you start using the next struct, you'll have to adjust it to
* work with 64-bit firmware I think -PB
*/
struct pdc_iodc { /* PDC_IODC */
unsigned char hversion_model;
unsigned char hversion;
unsigned char spa;
unsigned char type;
unsigned int sversion_rev:4;
unsigned int sversion_model:19;
unsigned int sversion_opt:8;
unsigned char rev;
unsigned char dep;
unsigned char features;
unsigned char pad1;
unsigned int checksum:16;
unsigned int length:16;
unsigned int pad[15];
} __attribute__((aligned(8))) ;
#endif
#ifndef CONFIG_PA20
/* no BLTBs in pa2.0 processors */
struct pdc_btlb_info_range {
__u8 res00;
__u8 num_i;
__u8 num_d;
__u8 num_comb;
};
struct pdc_btlb_info { /* PDC_BLOCK_TLB, return of PDC_BTLB_INFO */
unsigned int min_size; /* minimum size of BTLB in pages */
unsigned int max_size; /* maximum size of BTLB in pages */
struct pdc_btlb_info_range fixed_range_info;
struct pdc_btlb_info_range variable_range_info;
};
#endif /* !CONFIG_PA20 */
#ifdef CONFIG_64BIT
struct pdc_memory_table_raddr { /* PDC_MEM/PDC_MEM_TABLE (return info) */
unsigned long entries_returned;
unsigned long entries_total;
};
struct pdc_memory_table { /* PDC_MEM/PDC_MEM_TABLE (arguments) */
unsigned long paddr;
unsigned int pages;
unsigned int reserved;
};
#endif /* CONFIG_64BIT */
struct pdc_system_map_mod_info { /* PDC_SYSTEM_MAP/FIND_MODULE */
unsigned long mod_addr;
unsigned long mod_pgs;
unsigned long add_addrs;
};
struct pdc_system_map_addr_info { /* PDC_SYSTEM_MAP/FIND_ADDRESS */
unsigned long mod_addr;
unsigned long mod_pgs;
};
struct pdc_initiator { /* PDC_INITIATOR */
int host_id;
int factor;
int width;
int mode;
};
struct hardware_path {
char flags; /* see bit definitions below */
char bc[6]; /* Bus Converter routing info to a specific */
/* I/O adaptor (< 0 means none, > 63 resvd) */
char mod; /* fixed field of specified module */
};
/*
* Device path specifications used by PDC.
*/
struct pdc_module_path {
struct hardware_path path;
unsigned int layers[6]; /* device-specific info (ctlr #, unit # ...) */
};
#ifndef CONFIG_PA20
/* Only used on some pre-PA2.0 boxes */
struct pdc_memory_map { /* PDC_MEMORY_MAP */
unsigned long hpa; /* mod's register set address */
unsigned long more_pgs; /* number of additional I/O pgs */
};
#endif
struct pdc_tod {
unsigned long tod_sec;
unsigned long tod_usec;
};
/* architected results from PDC_PIM/transfer hpmc on a PA1.1 machine */
struct pdc_hpmc_pim_11 { /* PDC_PIM */
__u32 gr[32];
__u32 cr[32];
__u32 sr[8];
__u32 iasq_back;
__u32 iaoq_back;
__u32 check_type;
__u32 cpu_state;
__u32 rsvd1;
__u32 cache_check;
__u32 tlb_check;
__u32 bus_check;
__u32 assists_check;
__u32 rsvd2;
__u32 assist_state;
__u32 responder_addr;
__u32 requestor_addr;
__u32 path_info;
__u64 fr[32];
};
/*
* architected results from PDC_PIM/transfer hpmc on a PA2.0 machine
*
* Note that PDC_PIM doesn't care whether or not wide mode was enabled
* so the results are different on PA1.1 vs. PA2.0 when in narrow mode.
*
* Note also that there are unarchitected results available, which
* are hversion dependent. Do a "ser pim 0 hpmc" after rebooting, since
* the firmware is probably the best way of printing hversion dependent
* data.
*/
struct pdc_hpmc_pim_20 { /* PDC_PIM */
__u64 gr[32];
__u64 cr[32];
__u64 sr[8];
__u64 iasq_back;
__u64 iaoq_back;
__u32 check_type;
__u32 cpu_state;
__u32 cache_check;
__u32 tlb_check;
__u32 bus_check;
__u32 assists_check;
__u32 assist_state;
__u32 path_info;
__u64 responder_addr;
__u64 requestor_addr;
__u64 fr[32];
};
void pdc_console_init(void); /* in pdc_console.c */
void pdc_console_restart(void);
void setup_pdc(void); /* in inventory.c */
/* wrapper-functions from pdc.c */
int pdc_add_valid(unsigned long address);
int pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len);
int pdc_chassis_disp(unsigned long disp);
int pdc_chassis_warn(unsigned long *warn);
int pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info);
int pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info);
int pdc_iodc_read(unsigned long *actcnt, unsigned long hpa, unsigned int index,
void *iodc_data, unsigned int iodc_data_size);
int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info,
struct pdc_module_path *mod_path, long mod_index);
int pdc_system_map_find_addrs(struct pdc_system_map_addr_info *pdc_addr_info,
long mod_index, long addr_index);
int pdc_model_info(struct pdc_model *model);
int pdc_model_sysmodel(char *name);
int pdc_model_cpuid(unsigned long *cpu_id);
int pdc_model_versions(unsigned long *versions, int id);
int pdc_model_capabilities(unsigned long *capabilities);
int pdc_cache_info(struct pdc_cache_info *cache);
int pdc_spaceid_bits(unsigned long *space_bits);
#ifndef CONFIG_PA20
int pdc_btlb_info(struct pdc_btlb_info *btlb);
int pdc_mem_map_hpa(struct pdc_memory_map *r_addr, struct pdc_module_path *mod_path);
#endif /* !CONFIG_PA20 */
int pdc_lan_station_id(char *lan_addr, unsigned long net_hpa);
int pdc_stable_read(unsigned long staddr, void *memaddr, unsigned long count);
int pdc_stable_write(unsigned long staddr, void *memaddr, unsigned long count);
int pdc_stable_get_size(unsigned long *size);
int pdc_stable_verify_contents(void);
int pdc_stable_initialize(void);
int pdc_pci_irt_size(unsigned long *num_entries, unsigned long hpa);
int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl);
int pdc_get_initiator(struct hardware_path *, struct pdc_initiator *);
int pdc_tod_read(struct pdc_tod *tod);
int pdc_tod_set(unsigned long sec, unsigned long usec);
#ifdef CONFIG_64BIT
int pdc_mem_mem_table(struct pdc_memory_table_raddr *r_addr,
struct pdc_memory_table *tbl, unsigned long entries);
#endif
void set_firmware_width(void);
void set_firmware_width_unlocked(void);
int pdc_do_firm_test_reset(unsigned long ftc_bitmap);
int pdc_do_reset(void);
int pdc_soft_power_info(unsigned long *power_reg);
int pdc_soft_power_button(int sw_control);
void pdc_io_reset(void);
void pdc_io_reset_devices(void);
int pdc_iodc_getc(void);
int pdc_iodc_print(const unsigned char *str, unsigned count);
void pdc_emergency_unlock(void);
int pdc_sti_call(unsigned long func, unsigned long flags,
unsigned long inptr, unsigned long outputr,
unsigned long glob_cfg);
static inline char * os_id_to_string(u16 os_id) {
switch(os_id) {
case OS_ID_NONE: return "No OS";
case OS_ID_HPUX: return "HP-UX";
case OS_ID_MPEXL: return "MPE-iX";
case OS_ID_OSF: return "OSF";
case OS_ID_HPRT: return "HP-RT";
case OS_ID_NOVEL: return "Novell Netware";
case OS_ID_LINUX: return "Linux";
default: return "Unknown";
}
}
#endif /* __KERNEL__ */
#define PAGE0 ((struct zeropage *)__PAGE_OFFSET)
/* DEFINITION OF THE ZERO-PAGE (PAG0) */
/* based on work by Jason Eckhardt (jason@equator.com) */
/* flags of the device_path */
#define PF_AUTOBOOT 0x80
#define PF_AUTOSEARCH 0x40
#define PF_TIMER 0x0F
struct device_path { /* page 1-69 */
unsigned char flags; /* flags see above! */
unsigned char bc[6]; /* bus converter routing info */
unsigned char mod;
unsigned int layers[6];/* device-specific layer-info */
} __attribute__((aligned(8))) ;
struct pz_device {
struct device_path dp; /* see above */
/* struct iomod *hpa; */
unsigned int hpa; /* HPA base address */
/* char *spa; */
unsigned int spa; /* SPA base address */
/* int (*iodc_io)(struct iomod*, ...); */
unsigned int iodc_io; /* device entry point */
short pad; /* reserved */
unsigned short cl_class;/* see below */
} __attribute__((aligned(8))) ;
struct zeropage {
/* [0x000] initialize vectors (VEC) */
unsigned int vec_special; /* must be zero */
/* int (*vec_pow_fail)(void);*/
unsigned int vec_pow_fail; /* power failure handler */
/* int (*vec_toc)(void); */
unsigned int vec_toc;
unsigned int vec_toclen;
/* int (*vec_rendz)(void); */
unsigned int vec_rendz;
int vec_pow_fail_flen;
int vec_pad[10];
/* [0x040] reserved processor dependent */
int pad0[112];
/* [0x200] reserved */
int pad1[84];
/* [0x350] memory configuration (MC) */
int memc_cont; /* contiguous mem size (bytes) */
int memc_phsize; /* physical memory size */
int memc_adsize; /* additional mem size, bytes of SPA space used by PDC */
unsigned int mem_pdc_hi; /* used for 64-bit */
/* [0x360] various parameters for the boot-CPU */
/* unsigned int *mem_booterr[8]; */
unsigned int mem_booterr[8]; /* ptr to boot errors */
unsigned int mem_free; /* first location, where OS can be loaded */
/* struct iomod *mem_hpa; */
unsigned int mem_hpa; /* HPA of the boot-CPU */
/* int (*mem_pdc)(int, ...); */
unsigned int mem_pdc; /* PDC entry point */
unsigned int mem_10msec; /* number of clock ticks in 10msec */
/* [0x390] initial memory module (IMM) */
/* struct iomod *imm_hpa; */
unsigned int imm_hpa; /* HPA of the IMM */
int imm_soft_boot; /* 0 = was hard boot, 1 = was soft boot */
unsigned int imm_spa_size; /* SPA size of the IMM in bytes */
unsigned int imm_max_mem; /* bytes of mem in IMM */
/* [0x3A0] boot console, display device and keyboard */
struct pz_device mem_cons; /* description of console device */
struct pz_device mem_boot; /* description of boot device */
struct pz_device mem_kbd; /* description of keyboard device */
/* [0x430] reserved */
int pad430[116];
/* [0x600] processor dependent */
__u32 pad600[1];
__u32 proc_sti; /* pointer to STI ROM */
__u32 pad608[126];
};
#endif /* !defined(__ASSEMBLY__) */
#endif /* _PARISC_PDC_H */

View File

@@ -0,0 +1,381 @@
/*
* include/asm-parisc/pdc_chassis.h
*
* Copyright (C) 2002 Laurent Canet <canetl@esiee.fr>
* Copyright (C) 2002 Thibaut Varene <varenet@parisc-linux.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* TODO: - handle processor number on SMP systems (Reporting Entity ID)
* - handle message ID
* - handle timestamps
*/
#ifndef _PARISC_PDC_CHASSIS_H
#define _PARISC_PDC_CHASSIS_H
/*
* ----------
* Prototypes
* ----------
*/
int pdc_chassis_send_status(int message);
void parisc_pdc_chassis_init(void);
/*
* -----------------
* Direct call names
* -----------------
* They setup everything for you, the Log message and the corresponding LED state
*/
#define PDC_CHASSIS_DIRECT_BSTART 0
#define PDC_CHASSIS_DIRECT_BCOMPLETE 1
#define PDC_CHASSIS_DIRECT_SHUTDOWN 2
#define PDC_CHASSIS_DIRECT_PANIC 3
#define PDC_CHASSIS_DIRECT_HPMC 4
#define PDC_CHASSIS_DIRECT_LPMC 5
#define PDC_CHASSIS_DIRECT_DUMP 6 /* not yet implemented */
#define PDC_CHASSIS_DIRECT_OOPS 7 /* not yet implemented */
/*
* ------------
* LEDs control
* ------------
* Set the three LEDs -- Run, Attn, and Fault.
*/
/* Old PDC LED control */
#define PDC_CHASSIS_DISP_DATA(v) ((unsigned long)(v) << 17)
/*
* Available PDC PAT LED states
*/
#define PDC_CHASSIS_LED_RUN_OFF (0ULL << 4)
#define PDC_CHASSIS_LED_RUN_FLASH (1ULL << 4)
#define PDC_CHASSIS_LED_RUN_ON (2ULL << 4)
#define PDC_CHASSIS_LED_RUN_NC (3ULL << 4)
#define PDC_CHASSIS_LED_ATTN_OFF (0ULL << 6)
#define PDC_CHASSIS_LED_ATTN_FLASH (1ULL << 6)
#define PDC_CHASSIS_LED_ATTN_NC (3ULL << 6) /* ATTN ON is invalid */
#define PDC_CHASSIS_LED_FAULT_OFF (0ULL << 8)
#define PDC_CHASSIS_LED_FAULT_FLASH (1ULL << 8)
#define PDC_CHASSIS_LED_FAULT_ON (2ULL << 8)
#define PDC_CHASSIS_LED_FAULT_NC (3ULL << 8)
#define PDC_CHASSIS_LED_VALID (1ULL << 10)
/*
* Valid PDC PAT LED states combinations
*/
/* System running normally */
#define PDC_CHASSIS_LSTATE_RUN_NORMAL (PDC_CHASSIS_LED_RUN_ON | \
PDC_CHASSIS_LED_ATTN_OFF | \
PDC_CHASSIS_LED_FAULT_OFF | \
PDC_CHASSIS_LED_VALID )
/* System crashed and rebooted itself successfully */
#define PDC_CHASSIS_LSTATE_RUN_CRASHREC (PDC_CHASSIS_LED_RUN_ON | \
PDC_CHASSIS_LED_ATTN_OFF | \
PDC_CHASSIS_LED_FAULT_FLASH | \
PDC_CHASSIS_LED_VALID )
/* There was a system interruption that did not take the system down */
#define PDC_CHASSIS_LSTATE_RUN_SYSINT (PDC_CHASSIS_LED_RUN_ON | \
PDC_CHASSIS_LED_ATTN_FLASH | \
PDC_CHASSIS_LED_FAULT_OFF | \
PDC_CHASSIS_LED_VALID )
/* System running and unexpected reboot or non-critical error detected */
#define PDC_CHASSIS_LSTATE_RUN_NCRIT (PDC_CHASSIS_LED_RUN_ON | \
PDC_CHASSIS_LED_ATTN_FLASH | \
PDC_CHASSIS_LED_FAULT_FLASH | \
PDC_CHASSIS_LED_VALID )
/* Executing non-OS code */
#define PDC_CHASSIS_LSTATE_NONOS (PDC_CHASSIS_LED_RUN_FLASH | \
PDC_CHASSIS_LED_ATTN_OFF | \
PDC_CHASSIS_LED_FAULT_OFF | \
PDC_CHASSIS_LED_VALID )
/* Boot failed - Executing non-OS code */
#define PDC_CHASSIS_LSTATE_NONOS_BFAIL (PDC_CHASSIS_LED_RUN_FLASH | \
PDC_CHASSIS_LED_ATTN_OFF | \
PDC_CHASSIS_LED_FAULT_ON | \
PDC_CHASSIS_LED_VALID )
/* Unexpected reboot occurred - Executing non-OS code */
#define PDC_CHASSIS_LSTATE_NONOS_UNEXP (PDC_CHASSIS_LED_RUN_FLASH | \
PDC_CHASSIS_LED_ATTN_OFF | \
PDC_CHASSIS_LED_FAULT_FLASH | \
PDC_CHASSIS_LED_VALID )
/* Executing non-OS code - Non-critical error detected */
#define PDC_CHASSIS_LSTATE_NONOS_NCRIT (PDC_CHASSIS_LED_RUN_FLASH | \
PDC_CHASSIS_LED_ATTN_FLASH | \
PDC_CHASSIS_LED_FAULT_OFF | \
PDC_CHASSIS_LED_VALID )
/* Boot failed - Executing non-OS code - Non-critical error detected */
#define PDC_CHASSIS_LSTATE_BFAIL_NCRIT (PDC_CHASSIS_LED_RUN_FLASH | \
PDC_CHASSIS_LED_ATTN_FLASH | \
PDC_CHASSIS_LED_FAULT_ON | \
PDC_CHASSIS_LED_VALID )
/* Unexpected reboot/recovering - Executing non-OS code - Non-critical error detected */
#define PDC_CHASSIS_LSTATE_UNEXP_NCRIT (PDC_CHASSIS_LED_RUN_FLASH | \
PDC_CHASSIS_LED_ATTN_FLASH | \
PDC_CHASSIS_LED_FAULT_FLASH | \
PDC_CHASSIS_LED_VALID )
/* Cannot execute PDC */
#define PDC_CHASSIS_LSTATE_CANNOT_PDC (PDC_CHASSIS_LED_RUN_OFF | \
PDC_CHASSIS_LED_ATTN_OFF | \
PDC_CHASSIS_LED_FAULT_OFF | \
PDC_CHASSIS_LED_VALID )
/* Boot failed - OS not up - PDC has detected a failure that prevents boot */
#define PDC_CHASSIS_LSTATE_FATAL_BFAIL (PDC_CHASSIS_LED_RUN_OFF | \
PDC_CHASSIS_LED_ATTN_OFF | \
PDC_CHASSIS_LED_FAULT_ON | \
PDC_CHASSIS_LED_VALID )
/* No code running - Non-critical error detected (double fault situation) */
#define PDC_CHASSIS_LSTATE_NOCODE_NCRIT (PDC_CHASSIS_LED_RUN_OFF | \
PDC_CHASSIS_LED_ATTN_FLASH | \
PDC_CHASSIS_LED_FAULT_OFF | \
PDC_CHASSIS_LED_VALID )
/* Boot failed - OS not up - Fatal failure detected - Non-critical error detected */
#define PDC_CHASSIS_LSTATE_FATAL_NCRIT (PDC_CHASSIS_LED_RUN_OFF | \
PDC_CHASSIS_LED_ATTN_FLASH | \
PDC_CHASSIS_LED_FAULT_ON | \
PDC_CHASSIS_LED_VALID )
/* All other states are invalid */
/*
* --------------
* PDC Log events
* --------------
* Here follows bits needed to fill up the log event sent to PDC_CHASSIS
* The log message contains: Alert level, Source, Source detail,
* Source ID, Problem detail, Caller activity, Activity status,
* Caller subactivity, Reporting entity type, Reporting entity ID,
* Data type, Unique message ID and EOM.
*/
/* Alert level */
#define PDC_CHASSIS_ALERT_FORWARD (0ULL << 36) /* no failure detected */
#define PDC_CHASSIS_ALERT_SERPROC (1ULL << 36) /* service proc - no failure */
#define PDC_CHASSIS_ALERT_NURGENT (2ULL << 36) /* non-urgent operator attn */
#define PDC_CHASSIS_ALERT_BLOCKED (3ULL << 36) /* system blocked */
#define PDC_CHASSIS_ALERT_CONF_CHG (4ULL << 36) /* unexpected configuration change */
#define PDC_CHASSIS_ALERT_ENV_PB (5ULL << 36) /* boot possible, environmental pb */
#define PDC_CHASSIS_ALERT_PENDING (6ULL << 36) /* boot possible, pending failure */
#define PDC_CHASSIS_ALERT_PERF_IMP (8ULL << 36) /* boot possible, performance impaired */
#define PDC_CHASSIS_ALERT_FUNC_IMP (10ULL << 36) /* boot possible, functionality impaired */
#define PDC_CHASSIS_ALERT_SOFT_FAIL (12ULL << 36) /* software failure */
#define PDC_CHASSIS_ALERT_HANG (13ULL << 36) /* system hang */
#define PDC_CHASSIS_ALERT_ENV_FATAL (14ULL << 36) /* fatal power or environmental pb */
#define PDC_CHASSIS_ALERT_HW_FATAL (15ULL << 36) /* fatal hardware problem */
/* Source */
#define PDC_CHASSIS_SRC_NONE (0ULL << 28) /* unknown, no source stated */
#define PDC_CHASSIS_SRC_PROC (1ULL << 28) /* processor */
/* For later use ? */
#define PDC_CHASSIS_SRC_PROC_CACHE (2ULL << 28) /* processor cache*/
#define PDC_CHASSIS_SRC_PDH (3ULL << 28) /* processor dependent hardware */
#define PDC_CHASSIS_SRC_PWR (4ULL << 28) /* power */
#define PDC_CHASSIS_SRC_FAB (5ULL << 28) /* fabric connector */
#define PDC_CHASSIS_SRC_PLATi (6ULL << 28) /* platform */
#define PDC_CHASSIS_SRC_MEM (7ULL << 28) /* memory */
#define PDC_CHASSIS_SRC_IO (8ULL << 28) /* I/O */
#define PDC_CHASSIS_SRC_CELL (9ULL << 28) /* cell */
#define PDC_CHASSIS_SRC_PD (10ULL << 28) /* protected domain */
/* Source detail field */
#define PDC_CHASSIS_SRC_D_PROC (1ULL << 24) /* processor general */
/* Source ID - platform dependent */
#define PDC_CHASSIS_SRC_ID_UNSPEC (0ULL << 16)
/* Problem detail - problem source dependent */
#define PDC_CHASSIS_PB_D_PROC_NONE (0ULL << 32) /* no problem detail */
#define PDC_CHASSIS_PB_D_PROC_TIMEOUT (4ULL << 32) /* timeout */
/* Caller activity */
#define PDC_CHASSIS_CALL_ACT_HPUX_BL (7ULL << 12) /* Boot Loader */
#define PDC_CHASSIS_CALL_ACT_HPUX_PD (8ULL << 12) /* SAL_PD activities */
#define PDC_CHASSIS_CALL_ACT_HPUX_EVENT (9ULL << 12) /* SAL_EVENTS activities */
#define PDC_CHASSIS_CALL_ACT_HPUX_IO (10ULL << 12) /* SAL_IO activities */
#define PDC_CHASSIS_CALL_ACT_HPUX_PANIC (11ULL << 12) /* System panic */
#define PDC_CHASSIS_CALL_ACT_HPUX_INIT (12ULL << 12) /* System initialization */
#define PDC_CHASSIS_CALL_ACT_HPUX_SHUT (13ULL << 12) /* System shutdown */
#define PDC_CHASSIS_CALL_ACT_HPUX_WARN (14ULL << 12) /* System warning */
#define PDC_CHASSIS_CALL_ACT_HPUX_DU (15ULL << 12) /* Display_Activity() update */
/* Activity status - implementation dependent */
#define PDC_CHASSIS_ACT_STATUS_UNSPEC (0ULL << 0)
/* Caller subactivity - implementation dependent */
/* FIXME: other subactivities ? */
#define PDC_CHASSIS_CALL_SACT_UNSPEC (0ULL << 4) /* implementation dependent */
/* Reporting entity type */
#define PDC_CHASSIS_RET_GENERICOS (12ULL << 52) /* generic OSes */
#define PDC_CHASSIS_RET_IA64_NT (13ULL << 52) /* IA-64 NT */
#define PDC_CHASSIS_RET_HPUX (14ULL << 52) /* HP-UX */
#define PDC_CHASSIS_RET_DIAG (15ULL << 52) /* offline diagnostics & utilities */
/* Reporting entity ID */
#define PDC_CHASSIS_REID_UNSPEC (0ULL << 44)
/* Data type */
#define PDC_CHASSIS_DT_NONE (0ULL << 59) /* data field unused */
/* For later use ? Do we need these ? */
#define PDC_CHASSIS_DT_PHYS_ADDR (1ULL << 59) /* physical address */
#define PDC_CHASSIS_DT_DATA_EXPECT (2ULL << 59) /* expected data */
#define PDC_CHASSIS_DT_ACTUAL (3ULL << 59) /* actual data */
#define PDC_CHASSIS_DT_PHYS_LOC (4ULL << 59) /* physical location */
#define PDC_CHASSIS_DT_PHYS_LOC_EXT (5ULL << 59) /* physical location extension */
#define PDC_CHASSIS_DT_TAG (6ULL << 59) /* tag */
#define PDC_CHASSIS_DT_SYNDROME (7ULL << 59) /* syndrome */
#define PDC_CHASSIS_DT_CODE_ADDR (8ULL << 59) /* code address */
#define PDC_CHASSIS_DT_ASCII_MSG (9ULL << 59) /* ascii message */
#define PDC_CHASSIS_DT_POST (10ULL << 59) /* POST code */
#define PDC_CHASSIS_DT_TIMESTAMP (11ULL << 59) /* timestamp */
#define PDC_CHASSIS_DT_DEV_STAT (12ULL << 59) /* device status */
#define PDC_CHASSIS_DT_DEV_TYPE (13ULL << 59) /* device type */
#define PDC_CHASSIS_DT_PB_DET (14ULL << 59) /* problem detail */
#define PDC_CHASSIS_DT_ACT_LEV (15ULL << 59) /* activity level/timeout */
#define PDC_CHASSIS_DT_SER_NUM (16ULL << 59) /* serial number */
#define PDC_CHASSIS_DT_REV_NUM (17ULL << 59) /* revision number */
#define PDC_CHASSIS_DT_INTERRUPT (18ULL << 59) /* interruption information */
#define PDC_CHASSIS_DT_TEST_NUM (19ULL << 59) /* test number */
#define PDC_CHASSIS_DT_STATE_CHG (20ULL << 59) /* major changes in system state */
#define PDC_CHASSIS_DT_PROC_DEALLOC (21ULL << 59) /* processor deallocate */
#define PDC_CHASSIS_DT_RESET (30ULL << 59) /* reset type and cause */
#define PDC_CHASSIS_DT_PA_LEGACY (31ULL << 59) /* legacy PA hex chassis code */
/* System states - part of major changes in system state data field */
#define PDC_CHASSIS_SYSTATE_BSTART (0ULL << 0) /* boot start */
#define PDC_CHASSIS_SYSTATE_BCOMP (1ULL << 0) /* boot complete */
#define PDC_CHASSIS_SYSTATE_CHANGE (2ULL << 0) /* major change */
#define PDC_CHASSIS_SYSTATE_LED (3ULL << 0) /* LED change */
#define PDC_CHASSIS_SYSTATE_PANIC (9ULL << 0) /* OS Panic */
#define PDC_CHASSIS_SYSTATE_DUMP (10ULL << 0) /* memory dump */
#define PDC_CHASSIS_SYSTATE_HPMC (11ULL << 0) /* processing HPMC */
#define PDC_CHASSIS_SYSTATE_HALT (15ULL << 0) /* system halted */
/* Message ID */
#define PDC_CHASSIS_MSG_ID (0ULL << 40) /* we do not handle msg IDs atm */
/* EOM - separates log entries */
#define PDC_CHASSIS_EOM_CLEAR (0ULL << 43)
#define PDC_CHASSIS_EOM_SET (1ULL << 43)
/*
* Preformated well known messages
*/
/* Boot started */
#define PDC_CHASSIS_PMSG_BSTART (PDC_CHASSIS_ALERT_SERPROC | \
PDC_CHASSIS_SRC_PROC | \
PDC_CHASSIS_SRC_D_PROC | \
PDC_CHASSIS_SRC_ID_UNSPEC | \
PDC_CHASSIS_PB_D_PROC_NONE | \
PDC_CHASSIS_CALL_ACT_HPUX_INIT | \
PDC_CHASSIS_ACT_STATUS_UNSPEC | \
PDC_CHASSIS_CALL_SACT_UNSPEC | \
PDC_CHASSIS_RET_HPUX | \
PDC_CHASSIS_REID_UNSPEC | \
PDC_CHASSIS_DT_STATE_CHG | \
PDC_CHASSIS_SYSTATE_BSTART | \
PDC_CHASSIS_MSG_ID | \
PDC_CHASSIS_EOM_SET )
/* Boot complete */
#define PDC_CHASSIS_PMSG_BCOMPLETE (PDC_CHASSIS_ALERT_SERPROC | \
PDC_CHASSIS_SRC_PROC | \
PDC_CHASSIS_SRC_D_PROC | \
PDC_CHASSIS_SRC_ID_UNSPEC | \
PDC_CHASSIS_PB_D_PROC_NONE | \
PDC_CHASSIS_CALL_ACT_HPUX_INIT | \
PDC_CHASSIS_ACT_STATUS_UNSPEC | \
PDC_CHASSIS_CALL_SACT_UNSPEC | \
PDC_CHASSIS_RET_HPUX | \
PDC_CHASSIS_REID_UNSPEC | \
PDC_CHASSIS_DT_STATE_CHG | \
PDC_CHASSIS_SYSTATE_BCOMP | \
PDC_CHASSIS_MSG_ID | \
PDC_CHASSIS_EOM_SET )
/* Shutdown */
#define PDC_CHASSIS_PMSG_SHUTDOWN (PDC_CHASSIS_ALERT_SERPROC | \
PDC_CHASSIS_SRC_PROC | \
PDC_CHASSIS_SRC_D_PROC | \
PDC_CHASSIS_SRC_ID_UNSPEC | \
PDC_CHASSIS_PB_D_PROC_NONE | \
PDC_CHASSIS_CALL_ACT_HPUX_SHUT | \
PDC_CHASSIS_ACT_STATUS_UNSPEC | \
PDC_CHASSIS_CALL_SACT_UNSPEC | \
PDC_CHASSIS_RET_HPUX | \
PDC_CHASSIS_REID_UNSPEC | \
PDC_CHASSIS_DT_STATE_CHG | \
PDC_CHASSIS_SYSTATE_HALT | \
PDC_CHASSIS_MSG_ID | \
PDC_CHASSIS_EOM_SET )
/* Panic */
#define PDC_CHASSIS_PMSG_PANIC (PDC_CHASSIS_ALERT_SOFT_FAIL | \
PDC_CHASSIS_SRC_PROC | \
PDC_CHASSIS_SRC_D_PROC | \
PDC_CHASSIS_SRC_ID_UNSPEC | \
PDC_CHASSIS_PB_D_PROC_NONE | \
PDC_CHASSIS_CALL_ACT_HPUX_PANIC| \
PDC_CHASSIS_ACT_STATUS_UNSPEC | \
PDC_CHASSIS_CALL_SACT_UNSPEC | \
PDC_CHASSIS_RET_HPUX | \
PDC_CHASSIS_REID_UNSPEC | \
PDC_CHASSIS_DT_STATE_CHG | \
PDC_CHASSIS_SYSTATE_PANIC | \
PDC_CHASSIS_MSG_ID | \
PDC_CHASSIS_EOM_SET )
// FIXME: extrapolated data
/* HPMC */
#define PDC_CHASSIS_PMSG_HPMC (PDC_CHASSIS_ALERT_CONF_CHG /*?*/ | \
PDC_CHASSIS_SRC_PROC | \
PDC_CHASSIS_SRC_D_PROC | \
PDC_CHASSIS_SRC_ID_UNSPEC | \
PDC_CHASSIS_PB_D_PROC_NONE | \
PDC_CHASSIS_CALL_ACT_HPUX_WARN | \
PDC_CHASSIS_RET_HPUX | \
PDC_CHASSIS_DT_STATE_CHG | \
PDC_CHASSIS_SYSTATE_HPMC | \
PDC_CHASSIS_MSG_ID | \
PDC_CHASSIS_EOM_SET )
/* LPMC */
#define PDC_CHASSIS_PMSG_LPMC (PDC_CHASSIS_ALERT_BLOCKED /*?*/| \
PDC_CHASSIS_SRC_PROC | \
PDC_CHASSIS_SRC_D_PROC | \
PDC_CHASSIS_SRC_ID_UNSPEC | \
PDC_CHASSIS_PB_D_PROC_NONE | \
PDC_CHASSIS_CALL_ACT_HPUX_WARN | \
PDC_CHASSIS_ACT_STATUS_UNSPEC | \
PDC_CHASSIS_CALL_SACT_UNSPEC | \
PDC_CHASSIS_RET_HPUX | \
PDC_CHASSIS_REID_UNSPEC | \
PDC_CHASSIS_DT_STATE_CHG | \
PDC_CHASSIS_SYSTATE_CHANGE | \
PDC_CHASSIS_MSG_ID | \
PDC_CHASSIS_EOM_SET )
#endif /* _PARISC_PDC_CHASSIS_H */
/* vim: set ts=8 */

View File

@@ -0,0 +1,308 @@
#ifndef __PARISC_PATPDC_H
#define __PARISC_PATPDC_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright 2000 (c) Hewlett Packard (Paul Bame <bame()spam.parisc-linux.org>)
* Copyright 2000,2004 (c) Grant Grundler <grundler()nahspam.parisc-linux.org>
*/
#define PDC_PAT_CELL 64L /* Interface for gaining and
* manipulatin g cell state within PD */
#define PDC_PAT_CELL_GET_NUMBER 0L /* Return Cell number */
#define PDC_PAT_CELL_GET_INFO 1L /* Returns info about Cell */
#define PDC_PAT_CELL_MODULE 2L /* Returns info about Module */
#define PDC_PAT_CELL_SET_ATTENTION 9L /* Set Cell Attention indicator */
#define PDC_PAT_CELL_NUMBER_TO_LOC 10L /* Cell Number -> Location */
#define PDC_PAT_CELL_WALK_FABRIC 11L /* Walk the Fabric */
#define PDC_PAT_CELL_GET_RDT_SIZE 12L /* Return Route Distance Table Sizes */
#define PDC_PAT_CELL_GET_RDT 13L /* Return Route Distance Tables */
#define PDC_PAT_CELL_GET_LOCAL_PDH_SZ 14L /* Read Local PDH Buffer Size */
#define PDC_PAT_CELL_SET_LOCAL_PDH 15L /* Write Local PDH Buffer */
#define PDC_PAT_CELL_GET_REMOTE_PDH_SZ 16L /* Return Remote PDH Buffer Size */
#define PDC_PAT_CELL_GET_REMOTE_PDH 17L /* Read Remote PDH Buffer */
#define PDC_PAT_CELL_GET_DBG_INFO 128L /* Return DBG Buffer Info */
#define PDC_PAT_CELL_CHANGE_ALIAS 129L /* Change Non-Equivalent Alias Chacking */
/*
** Arg to PDC_PAT_CELL_MODULE memaddr[4]
**
** Addresses on the Merced Bus != all Runway Bus addresses.
** This is intended for programming SBA/LBA chips range registers.
*/
#define IO_VIEW 0UL
#define PA_VIEW 1UL
/* PDC_PAT_CELL_MODULE entity type values */
#define PAT_ENTITY_CA 0 /* central agent */
#define PAT_ENTITY_PROC 1 /* processor */
#define PAT_ENTITY_MEM 2 /* memory controller */
#define PAT_ENTITY_SBA 3 /* system bus adapter */
#define PAT_ENTITY_LBA 4 /* local bus adapter */
#define PAT_ENTITY_PBC 5 /* processor bus converter */
#define PAT_ENTITY_XBC 6 /* crossbar fabric connect */
#define PAT_ENTITY_RC 7 /* fabric interconnect */
/* PDC_PAT_CELL_MODULE address range type values */
#define PAT_PBNUM 0 /* PCI Bus Number */
#define PAT_LMMIO 1 /* < 4G MMIO Space */
#define PAT_GMMIO 2 /* > 4G MMIO Space */
#define PAT_NPIOP 3 /* Non Postable I/O Port Space */
#define PAT_PIOP 4 /* Postable I/O Port Space */
#define PAT_AHPA 5 /* Addional HPA Space */
#define PAT_UFO 6 /* HPA Space (UFO for Mariposa) */
#define PAT_GNIP 7 /* GNI Reserved Space */
/* PDC PAT CHASSIS LOG -- Platform logging & forward progress functions */
#define PDC_PAT_CHASSIS_LOG 65L
#define PDC_PAT_CHASSIS_WRITE_LOG 0L /* Write Log Entry */
#define PDC_PAT_CHASSIS_READ_LOG 1L /* Read Log Entry */
/* PDC PAT CPU -- CPU configuration within the protection domain */
#define PDC_PAT_CPU 67L
#define PDC_PAT_CPU_INFO 0L /* Return CPU config info */
#define PDC_PAT_CPU_DELETE 1L /* Delete CPU */
#define PDC_PAT_CPU_ADD 2L /* Add CPU */
#define PDC_PAT_CPU_GET_NUMBER 3L /* Return CPU Number */
#define PDC_PAT_CPU_GET_HPA 4L /* Return CPU HPA */
#define PDC_PAT_CPU_STOP 5L /* Stop CPU */
#define PDC_PAT_CPU_RENDEZVOUS 6L /* Rendezvous CPU */
#define PDC_PAT_CPU_GET_CLOCK_INFO 7L /* Return CPU Clock info */
#define PDC_PAT_CPU_GET_RENDEZVOUS_STATE 8L /* Return Rendezvous State */
#define PDC_PAT_CPU_PLUNGE_FABRIC 128L /* Plunge Fabric */
#define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache
* Cleansing Mode */
/* PDC PAT EVENT -- Platform Events */
#define PDC_PAT_EVENT 68L
#define PDC_PAT_EVENT_GET_CAPS 0L /* Get Capabilities */
#define PDC_PAT_EVENT_SET_MODE 1L /* Set Notification Mode */
#define PDC_PAT_EVENT_SCAN 2L /* Scan Event */
#define PDC_PAT_EVENT_HANDLE 3L /* Handle Event */
#define PDC_PAT_EVENT_GET_NB_CALL 4L /* Get Non-Blocking call Args */
/* PDC PAT HPMC -- Cause processor to go into spin loop, and wait
* for wake up from Monarch Processor.
*/
#define PDC_PAT_HPMC 70L
#define PDC_PAT_HPMC_RENDEZ_CPU 0L /* go into spin loop */
#define PDC_PAT_HPMC_SET_PARAMS 1L /* Allows OS to specify intr which PDC
* will use to interrupt OS during
* machine check rendezvous */
/* parameters for PDC_PAT_HPMC_SET_PARAMS: */
#define HPMC_SET_PARAMS_INTR 1L /* Rendezvous Interrupt */
#define HPMC_SET_PARAMS_WAKE 2L /* Wake up processor */
/* PDC PAT IO -- On-line services for I/O modules */
#define PDC_PAT_IO 71L
#define PDC_PAT_IO_GET_SLOT_STATUS 5L /* Get Slot Status Info*/
#define PDC_PAT_IO_GET_LOC_FROM_HARDWARE 6L /* Get Physical Location from */
/* Hardware Path */
#define PDC_PAT_IO_GET_HARDWARE_FROM_LOC 7L /* Get Hardware Path from
* Physical Location */
#define PDC_PAT_IO_GET_PCI_CONFIG_FROM_HW 11L /* Get PCI Configuration
* Address from Hardware Path */
#define PDC_PAT_IO_GET_HW_FROM_PCI_CONFIG 12L /* Get Hardware Path
* from PCI Configuration Address */
#define PDC_PAT_IO_READ_HOST_BRIDGE_INFO 13L /* Read Host Bridge State Info */
#define PDC_PAT_IO_CLEAR_HOST_BRIDGE_INFO 14L /* Clear Host Bridge State Info*/
#define PDC_PAT_IO_GET_PCI_ROUTING_TABLE_SIZE 15L /* Get PCI INT Routing Table
* Size */
#define PDC_PAT_IO_GET_PCI_ROUTING_TABLE 16L /* Get PCI INT Routing Table */
#define PDC_PAT_IO_GET_HINT_TABLE_SIZE 17L /* Get Hint Table Size */
#define PDC_PAT_IO_GET_HINT_TABLE 18L /* Get Hint Table */
#define PDC_PAT_IO_PCI_CONFIG_READ 19L /* PCI Config Read */
#define PDC_PAT_IO_PCI_CONFIG_WRITE 20L /* PCI Config Write */
#define PDC_PAT_IO_GET_NUM_IO_SLOTS 21L /* Get Number of I/O Bay Slots in
* Cabinet */
#define PDC_PAT_IO_GET_LOC_IO_SLOTS 22L /* Get Physical Location of I/O */
/* Bay Slots in Cabinet */
#define PDC_PAT_IO_BAY_STATUS_INFO 28L /* Get I/O Bay Slot Status Info */
#define PDC_PAT_IO_GET_PROC_VIEW 29L /* Get Processor view of IO address */
#define PDC_PAT_IO_PROG_SBA_DIR_RANGE 30L /* Program directed range */
/* PDC PAT MEM -- Manage memory page deallocation */
#define PDC_PAT_MEM 72L
#define PDC_PAT_MEM_PD_INFO 0L /* Return PDT info for PD */
#define PDC_PAT_MEM_PD_CLEAR 1L /* Clear PDT for PD */
#define PDC_PAT_MEM_PD_READ 2L /* Read PDT entries for PD */
#define PDC_PAT_MEM_PD_RESET 3L /* Reset clear bit for PD */
#define PDC_PAT_MEM_CELL_INFO 5L /* Return PDT info For Cell */
#define PDC_PAT_MEM_CELL_CLEAR 6L /* Clear PDT For Cell */
#define PDC_PAT_MEM_CELL_READ 7L /* Read PDT entries For Cell */
#define PDC_PAT_MEM_CELL_RESET 8L /* Reset clear bit For Cell */
#define PDC_PAT_MEM_SETGM 9L /* Set Golden Memory value */
#define PDC_PAT_MEM_ADD_PAGE 10L /* ADDs a page to the cell */
#define PDC_PAT_MEM_ADDRESS 11L /* Get Physical Location From */
/* Memory Address */
#define PDC_PAT_MEM_GET_TXT_SIZE 12L /* Get Formatted Text Size */
#define PDC_PAT_MEM_GET_PD_TXT 13L /* Get PD Formatted Text */
#define PDC_PAT_MEM_GET_CELL_TXT 14L /* Get Cell Formatted Text */
#define PDC_PAT_MEM_RD_STATE_INFO 15L /* Read Mem Module State Info*/
#define PDC_PAT_MEM_CLR_STATE_INFO 16L /*Clear Mem Module State Info*/
#define PDC_PAT_MEM_CLEAN_RANGE 128L /*Clean Mem in specific range*/
#define PDC_PAT_MEM_GET_TBL_SIZE 131L /* Get Memory Table Size */
#define PDC_PAT_MEM_GET_TBL 132L /* Get Memory Table */
/* PDC PAT NVOLATILE -- Access Non-Volatile Memory */
#define PDC_PAT_NVOLATILE 73L
#define PDC_PAT_NVOLATILE_READ 0L /* Read Non-Volatile Memory */
#define PDC_PAT_NVOLATILE_WRITE 1L /* Write Non-Volatile Memory */
#define PDC_PAT_NVOLATILE_GET_SIZE 2L /* Return size of NVM */
#define PDC_PAT_NVOLATILE_VERIFY 3L /* Verify contents of NVM */
#define PDC_PAT_NVOLATILE_INIT 4L /* Initialize NVM */
/* PDC PAT PD */
#define PDC_PAT_PD 74L /* Protection Domain Info */
#define PDC_PAT_PD_GET_ADDR_MAP 0L /* Get Address Map */
/* PDC_PAT_PD_GET_ADDR_MAP entry types */
#define PAT_MEMORY_DESCRIPTOR 1
/* PDC_PAT_PD_GET_ADDR_MAP memory types */
#define PAT_MEMTYPE_MEMORY 0
#define PAT_MEMTYPE_FIRMWARE 4
/* PDC_PAT_PD_GET_ADDR_MAP memory usage */
#define PAT_MEMUSE_GENERAL 0
#define PAT_MEMUSE_GI 128
#define PAT_MEMUSE_GNI 129
#ifndef __ASSEMBLY__
#include <linux/types.h>
#ifdef CONFIG_64BIT
#define is_pdc_pat() (PDC_TYPE_PAT == pdc_type)
extern int pdc_pat_get_irt_size(unsigned long *num_entries, unsigned long cell_num);
extern int pdc_pat_get_irt(void *r_addr, unsigned long cell_num);
#else /* ! CONFIG_64BIT */
/* No PAT support for 32-bit kernels...sorry */
#define is_pdc_pat() (0)
#define pdc_pat_get_irt_size(num_entries, cell_numn) PDC_BAD_PROC
#define pdc_pat_get_irt(r_addr, cell_num) PDC_BAD_PROC
#endif /* ! CONFIG_64BIT */
struct pdc_pat_cell_num {
unsigned long cell_num;
unsigned long cell_loc;
};
struct pdc_pat_cpu_num {
unsigned long cpu_num;
unsigned long cpu_loc;
};
struct pdc_pat_pd_addr_map_entry {
unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */
unsigned char reserve1[5];
unsigned char memory_type;
unsigned char memory_usage;
unsigned long paddr;
unsigned int pages; /* Length in 4K pages */
unsigned int reserve2;
unsigned long cell_map;
};
/********************************************************************
* PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr
* ----------------------------------------------------------
* Bit 0 to 51 - conf_base_addr
* Bit 52 to 62 - reserved
* Bit 63 - endianess bit
********************************************************************/
#define PAT_GET_CBA(value) ((value) & 0xfffffffffffff000UL)
/********************************************************************
* PDC_PAT_CELL[Return Cell Module] memaddr[1] mod_info
* ----------------------------------------------------
* Bit 0 to 7 - entity type
* 0 = central agent, 1 = processor,
* 2 = memory controller, 3 = system bus adapter,
* 4 = local bus adapter, 5 = processor bus converter,
* 6 = crossbar fabric connect, 7 = fabric interconnect,
* 8 to 254 reserved, 255 = unknown.
* Bit 8 to 15 - DVI
* Bit 16 to 23 - IOC functions
* Bit 24 to 39 - reserved
* Bit 40 to 63 - mod_pages
* number of 4K pages a module occupies starting at conf_base_addr
********************************************************************/
#define PAT_GET_ENTITY(value) (((value) >> 56) & 0xffUL)
#define PAT_GET_DVI(value) (((value) >> 48) & 0xffUL)
#define PAT_GET_IOC(value) (((value) >> 40) & 0xffUL)
#define PAT_GET_MOD_PAGES(value) ((value) & 0xffffffUL)
/*
** PDC_PAT_CELL_GET_INFO return block
*/
typedef struct pdc_pat_cell_info_rtn_block {
unsigned long cpu_info;
unsigned long cell_info;
unsigned long cell_location;
unsigned long reo_location;
unsigned long mem_size;
unsigned long dimm_status;
unsigned long pdc_rev;
unsigned long fabric_info0;
unsigned long fabric_info1;
unsigned long fabric_info2;
unsigned long fabric_info3;
unsigned long reserved[21];
} pdc_pat_cell_info_rtn_block_t;
/* FIXME: mod[508] should really be a union of the various mod components */
struct pdc_pat_cell_mod_maddr_block { /* PDC_PAT_CELL_MODULE */
unsigned long cba; /* func 0 cfg space address */
unsigned long mod_info; /* module information */
unsigned long mod_location; /* physical location of the module */
struct hardware_path mod_path; /* module path (device path - layers) */
unsigned long mod[508]; /* PAT cell module components */
} __attribute__((aligned(8))) ;
typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t;
extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data);
extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod, unsigned long view_type, void *mem_addr);
extern int pdc_pat_cell_num_to_loc(void *, unsigned long);
extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa);
extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset);
extern int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *val);
extern int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val);
/* Flag to indicate this is a PAT box...don't use this unless you
** really have to...it might go away some day.
*/
extern int pdc_pat; /* arch/parisc/kernel/inventory.c */
#endif /* __ASSEMBLY__ */
#endif /* ! __PARISC_PATPDC_H */

View File

@@ -0,0 +1,7 @@
#ifndef _PARISC_PERCPU_H
#define _PARISC_PERCPU_H
#include <asm-generic/percpu.h>
#endif

View File

@@ -0,0 +1,74 @@
#ifndef _ASM_PERF_H_
#define _ASM_PERF_H_
/* ioctls */
#define PA_PERF_ON _IO('p', 1)
#define PA_PERF_OFF _IOR('p', 2, unsigned int)
#define PA_PERF_VERSION _IOR('p', 3, int)
#define PA_PERF_DEV "perf"
#define PA_PERF_MINOR 146
/* Interface types */
#define UNKNOWN_INTF 255
#define ONYX_INTF 0
#define CUDA_INTF 1
/* Common Onyx and Cuda images */
#define CPI 0
#define BUSUTIL 1
#define TLBMISS 2
#define TLBHANDMISS 3
#define PTKN 4
#define PNTKN 5
#define IMISS 6
#define DMISS 7
#define DMISS_ACCESS 8
#define BIG_CPI 9
#define BIG_LS 10
#define BR_ABORT 11
#define ISNT 12
#define QUADRANT 13
#define RW_PDFET 14
#define RW_WDFET 15
#define SHLIB_CPI 16
/* Cuda only Images */
#define FLOPS 17
#define CACHEMISS 18
#define BRANCHES 19
#define CRSTACK 20
#define I_CACHE_SPEC 21
#define MAX_CUDA_IMAGES 22
/* Onyx only Images */
#define ADDR_INV_ABORT_ALU 17
#define BRAD_STALL 18
#define CNTL_IN_PIPEL 19
#define DSNT_XFH 20
#define FET_SIG1 21
#define FET_SIG2 22
#define G7_1 23
#define G7_2 24
#define G7_3 25
#define G7_4 26
#define MPB_LABORT 27
#define PANIC 28
#define RARE_INST 29
#define RW_DFET 30
#define RW_IFET 31
#define RW_SDFET 32
#define SPEC_IFET 33
#define ST_COND0 34
#define ST_COND1 35
#define ST_COND2 36
#define ST_COND3 37
#define ST_COND4 38
#define ST_UNPRED0 39
#define ST_UNPRED1 40
#define UNPRED 41
#define GO_STORE 42
#define SHLIB_CALL 43
#define MAX_ONYX_IMAGES 44
#endif

View File

@@ -0,0 +1,149 @@
#ifndef _ASM_PGALLOC_H
#define _ASM_PGALLOC_H
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/threads.h>
#include <asm/processor.h>
#include <asm/fixmap.h>
#include <asm/cache.h>
/* Allocate the top level pgd (page directory)
*
* Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
* allocate the first pmd adjacent to the pgd. This means that we can
* subtract a constant offset to get to it. The pmd and pgd sizes are
* arranged so that a single pmd covers 4GB (giving a full 64-bit
* process access to 8TB) so our lookups are effectively L2 for the
* first 4GB of the kernel (i.e. for all ILP32 processes and all the
* kernel for machines with under 4GB of memory) */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
PGD_ALLOC_ORDER);
pgd_t *actual_pgd = pgd;
if (likely(pgd != NULL)) {
memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
#ifdef CONFIG_64BIT
actual_pgd += PTRS_PER_PGD;
/* Populate first pmd with allocated memory. We mark it
* with PxD_FLAG_ATTACHED as a signal to the system that this
* pmd entry may not be cleared. */
__pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT |
PxD_FLAG_VALID |
PxD_FLAG_ATTACHED)
+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
/* The first pmd entry also is marked with _PAGE_GATEWAY as
* a signal that this pmd may not be freed */
__pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
#endif
}
return actual_pgd;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifdef CONFIG_64BIT
pgd -= PTRS_PER_PGD;
#endif
free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
}
#if PT_NLEVELS == 3
/* Three Level Page Table Support for pmd's */
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{
__pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
(__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
PMD_ORDER);
if (pmd)
memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
return pmd;
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
#ifdef CONFIG_64BIT
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
/* This is the permanent pmd attached to the pgd;
* cannot free it */
return;
#endif
free_pages((unsigned long)pmd, PMD_ORDER);
}
#else
/* Two Level Page Table Support for pmd's */
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#endif
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
#ifdef CONFIG_64BIT
/* preserve the gateway marker if this is the beginning of
* the permanent pmd */
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
__pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
PxD_FLAG_VALID |
PxD_FLAG_ATTACHED)
+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
else
#endif
__pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID)
+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
}
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
#define pmd_pgtable(pmd) pmd_page(pmd)
static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (page)
pgtable_page_ctor(page);
return page;
}
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
return pte;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
pgtable_page_dtor(pte);
pte_free_kernel(mm, page_address(pte));
}
#define check_pgt_cache() do { } while (0)
#endif

View File

@@ -0,0 +1,508 @@
#ifndef _PARISC_PGTABLE_H
#define _PARISC_PGTABLE_H
#include <asm-generic/4level-fixup.h>
#include <asm/fixmap.h>
#ifndef __ASSEMBLY__
/*
* we simulate an x86-style page table for the linux mm code
*/
#include <linux/mm.h> /* for vm_area_struct */
#include <linux/bitops.h>
#include <asm/processor.h>
#include <asm/cache.h>
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
* memory. For the return value to be meaningful, ADDR must be >=
* PAGE_OFFSET. This operation can be relatively expensive (e.g.,
* require a hash-, or multi-level tree-lookup or something of that
* sort) but it guarantees to return TRUE only if accessing the page
* at that address does not cause an error. Note that there may be
* addresses for which kern_addr_valid() returns FALSE even though an
* access would not cause an error (e.g., this is typically true for
* memory mapped I/O regions.
*
* XXX Need to implement this for parisc.
*/
#define kern_addr_valid(addr) (1)
/* Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
#define set_pte(pteptr, pteval) \
do{ \
*(pteptr) = (pteval); \
} while(0)
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
#endif /* !__ASSEMBLY__ */
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
/* This is the size of the initially mapped kernel memory */
#ifdef CONFIG_64BIT
#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */
#else
#define KERNEL_INITIAL_ORDER 23 /* 0 to 1<<23 = 8MB */
#endif
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
#define PT_NLEVELS 3
#define PGD_ORDER 1 /* Number of pages per pgd */
#define PMD_ORDER 1 /* Number of pages per pmd */
#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */
#else
#define PT_NLEVELS 2
#define PGD_ORDER 1 /* Number of pages per pgd */
#define PGD_ALLOC_ORDER PGD_ORDER
#endif
/* Definitions for 3rd level (we use PLD here for Page Lower directory
* because PTE_SHIFT is used lower down to mean shift that has to be
* done to get usable bits out of the PTE) */
#define PLD_SHIFT PAGE_SHIFT
#define PLD_SIZE PAGE_SIZE
#define BITS_PER_PTE (PAGE_SHIFT - BITS_PER_PTE_ENTRY)
#define PTRS_PER_PTE (1UL << BITS_PER_PTE)
/* Definitions for 2nd level */
#define pgtable_cache_init() do { } while (0)
#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#if PT_NLEVELS == 3
#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
#else
#define BITS_PER_PMD 0
#endif
#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
/* Definitions for 1st level */
#define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD)
#define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD (1UL << BITS_PER_PGD)
#define USER_PTRS_PER_PGD PTRS_PER_PGD
#define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD)
#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
#define SPACEID_SHIFT (MAX_ADDRBITS - 32)
/* This calculates the number of initial pages we need for the initial
* page tables */
#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT)
# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))
#else
# define PT_INITIAL (1) /* all initial PTEs fit into one page */
#endif
/*
* pgd entries used up by user/kernel:
*/
#define FIRST_USER_ADDRESS 0
/* NB: The tlb miss handlers make certain assumptions about the order */
/* of the following bits, so be careful (One example, bits 25-31 */
/* are moved together in one instruction). */
#define _PAGE_READ_BIT 31 /* (0x001) read access allowed */
#define _PAGE_WRITE_BIT 30 /* (0x002) write access allowed */
#define _PAGE_EXEC_BIT 29 /* (0x004) execute access allowed */
#define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */
#define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */
#define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */
#define _PAGE_FILE_BIT _PAGE_DIRTY_BIT /* overload this bit */
#define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */
#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
#define _PAGE_FLUSH_BIT 21 /* (0x400) Software: translation valid */
/* for cache flushing only */
#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
/* N.B. The bits are defined in terms of a 32 bit word above, so the */
/* following macro is ok for both 32 and 64 bit. */
#define xlate_pabit(x) (31 - x)
/* this defines the shift to the usable bits in the PTE it is set so
* that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set
* to zero */
#define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT)
/* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
#define PFN_PTE_SHIFT 12
/* this is how many bits may be used by the file functions */
#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT)
#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT)
#define pgoff_to_pte(off) ((pte_t) { ((off) << PTE_SHIFT) | _PAGE_FILE })
#define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT))
#define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT))
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
#define _PAGE_EXEC (1 << xlate_pabit(_PAGE_EXEC_BIT))
#define _PAGE_GATEWAY (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
#define _PAGE_DMB (1 << xlate_pabit(_PAGE_DMB_BIT))
#define _PAGE_DIRTY (1 << xlate_pabit(_PAGE_DIRTY_BIT))
#define _PAGE_REFTRAP (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
#define _PAGE_FLUSH (1 << xlate_pabit(_PAGE_FLUSH_BIT))
#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
#define _PAGE_FILE (1 << xlate_pabit(_PAGE_FILE_BIT))
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_KERNEL (_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
* are page-aligned, we don't care about the PAGE_OFFSET bits, except
* for a few meta-information bits, so we shift the address to be
* able to effectively address 40/42/44-bits of physical address space
* depending on 4k/16k/64k PAGE_SIZE */
#define _PxD_PRESENT_BIT 31
#define _PxD_ATTACHED_BIT 30
#define _PxD_VALID_BIT 29
#define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT))
#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))
#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
#define PxD_FLAG_MASK (0xf)
#define PxD_FLAG_SHIFT (4)
#define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */
#ifndef __ASSEMBLY__
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
/* Others seem to make this executable, I don't know if that's correct
or not. The stack is mapped this way though so this is necessary
in the short term - dhd@linuxcare.com, 2000-08-08 */
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED)
#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
#define PAGE_COPY PAGE_EXECREAD
#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
#define PAGE_FLUSH __pgprot(_PAGE_FLUSH)
/*
* We could have an execute only page using "gateway - promote to priv
* level 3", but that is kind of silly. So, the way things are defined
* now, we must always have read permission for pages with execute
* permission. For the fun of it we'll go ahead and support write only
* pages.
*/
/*xwr*/
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 __P000 /* copy on write */
#define __P011 __P001 /* copy on write */
#define __P100 PAGE_EXECREAD
#define __P101 PAGE_EXECREAD
#define __P110 __P100 /* copy on write */
#define __P111 __P101 /* copy on write */
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
#define __S010 PAGE_WRITEONLY
#define __S011 PAGE_SHARED
#define __S100 PAGE_EXECREAD
#define __S101 PAGE_EXECREAD
#define __S110 PAGE_RWX
#define __S111 PAGE_RWX
extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
/* initial page tables for 0-8MB for kernel */
extern pte_t pg0[];
/* zero page used for uninitialized stuff */
extern unsigned long *empty_zero_page;
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#define pte_none(x) ((pte_val(x) == 0) || (pte_val(x) & _PAGE_FLUSH))
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0)
#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
#if PT_NLEVELS == 3
/* The first entry of the permanent pmd is not there if it contains
* the gateway marker */
#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
#else
#define pmd_none(x) (!pmd_val(x))
#endif
#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
static inline void pmd_clear(pmd_t *pmd) {
#if PT_NLEVELS == 3
if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
/* This is the entry pointing to the permanent pmd
* attached to the pgd; cannot clear it */
__pmd_val_set(*pmd, PxD_FLAG_ATTACHED);
else
#endif
__pmd_val_set(*pmd, 0);
}
#if PT_NLEVELS == 3
#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd)))
#define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd))
/* For 64 bit we have three level tables */
#define pgd_none(x) (!pgd_val(x))
#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID))
#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT)
static inline void pgd_clear(pgd_t *pgd) {
#if PT_NLEVELS == 3
if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
/* This is the permanent pmd attached to the pgd; cannot
* free it */
return;
#endif
__pgd_val_set(*pgd, 0);
}
#else
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
* setup: the pgd is never bad, and a pmd always exists (as it's folded
* into the pgd entry)
*/
static inline int pgd_none(pgd_t pgd) { return 0; }
static inline int pgd_bad(pgd_t pgd) { return 0; }
static inline int pgd_present(pgd_t pgd) { return 1; }
static inline void pgd_clear(pgd_t * pgdp) { }
#endif
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
#define __mk_pte(addr,pgprot) \
({ \
pte_t __pte; \
\
pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot)); \
\
__pte; \
})
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
{
pte_t pte;
pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
return pte;
}
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
/* Permanent address of a page. On parisc we don't have highmem. */
#define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT)
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_address(pmd)))
#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
/* to find an entry in a page-table-directory */
#define pgd_offset(mm, address) \
((mm)->pgd + ((address) >> PGDIR_SHIFT))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* Find an entry in the second-level page table.. */
#if PT_NLEVELS == 3
#define pmd_offset(dir,address) \
((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1)))
#else
#define pmd_offset(dir,addr) ((pmd_t *) dir)
#endif
/* Find an entry in the third-level page table.. */
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
#define pte_offset_kernel(pmd, address) \
((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
extern void paging_init (void);
/* Used for deferring calls to flush_dcache_page() */
#define PG_dcache_dirty PG_arch_1
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
/* Encode and de-code a swap entry */
#define __swp_type(x) ((x).val & 0x1f)
#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \
(((x).val >> 8) & ~0x7) )
#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \
((offset & 0x7) << 6) | \
((offset & ~0x7) << 8) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
#ifdef CONFIG_SMP
if (!pte_young(*ptep))
return 0;
return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));
#else
pte_t pte = *ptep;
if (!pte_young(pte))
return 0;
set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
return 1;
#endif
}
extern spinlock_t pa_dbit_lock;
struct mm_struct;
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t old_pte;
pte_t pte;
spin_lock(&pa_dbit_lock);
pte = old_pte = *ptep;
pte_val(pte) &= ~_PAGE_PRESENT;
pte_val(pte) |= _PAGE_FLUSH;
set_pte_at(mm,addr,ptep,pte);
spin_unlock(&pa_dbit_lock);
return old_pte;
}
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
#ifdef CONFIG_SMP
unsigned long new, old;
do {
old = pte_val(*ptep);
new = pte_val(pte_wrprotect(__pte (old)));
} while (cmpxchg((unsigned long *) ptep, old, new) != old);
#else
pte_t old_pte = *ptep;
set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
#endif
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))
#endif /* !__ASSEMBLY__ */
/* TLB page size encoding - see table 3-1 in parisc20.pdf */
#define _PAGE_SIZE_ENCODING_4K 0
#define _PAGE_SIZE_ENCODING_16K 1
#define _PAGE_SIZE_ENCODING_64K 2
#define _PAGE_SIZE_ENCODING_256K 3
#define _PAGE_SIZE_ENCODING_1M 4
#define _PAGE_SIZE_ENCODING_4M 5
#define _PAGE_SIZE_ENCODING_16M 6
#define _PAGE_SIZE_ENCODING_64M 7
#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
#endif
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
/* We provide our own get_unmapped_area to provide cache coherency */
#define HAVE_ARCH_UNMAPPED_AREA
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME
#include <asm-generic/pgtable.h>
#endif /* _PARISC_PGTABLE_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/poll.h>

View File

@@ -0,0 +1,129 @@
#ifndef __ARCH_PARISC_POSIX_TYPES_H
#define __ARCH_PARISC_POSIX_TYPES_H
/*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*/
typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
typedef unsigned short __kernel_nlink_t;
typedef long __kernel_off_t;
typedef int __kernel_pid_t;
typedef unsigned short __kernel_ipc_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
typedef int __kernel_suseconds_t;
typedef long __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef int __kernel_daddr_t;
/* Note these change from narrow to wide kernels */
#ifdef CONFIG_64BIT
typedef unsigned long __kernel_size_t;
typedef long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
typedef long __kernel_time_t;
#else
typedef unsigned int __kernel_size_t;
typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
typedef long __kernel_time_t;
#endif
typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t;
typedef unsigned short __kernel_gid16_t;
typedef unsigned int __kernel_uid32_t;
typedef unsigned int __kernel_gid32_t;
#ifdef __GNUC__
typedef long long __kernel_loff_t;
typedef long long __kernel_off64_t;
typedef unsigned long long __kernel_ino64_t;
#endif
typedef unsigned int __kernel_old_dev_t;
typedef struct {
int val[2];
} __kernel_fsid_t;
/* compatibility stuff */
typedef __kernel_uid_t __kernel_old_uid_t;
typedef __kernel_gid_t __kernel_old_gid_t;
#if defined(__KERNEL__)
#undef __FD_SET
static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
{
unsigned long __tmp = __fd / __NFDBITS;
unsigned long __rem = __fd % __NFDBITS;
__fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
}
#undef __FD_CLR
static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
{
unsigned long __tmp = __fd / __NFDBITS;
unsigned long __rem = __fd % __NFDBITS;
__fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
}
#undef __FD_ISSET
static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
{
unsigned long __tmp = __fd / __NFDBITS;
unsigned long __rem = __fd % __NFDBITS;
return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
}
/*
* This will unroll the loop for the normal constant case (8 ints,
* for a 256-bit fd_set)
*/
#undef __FD_ZERO
static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
{
unsigned long *__tmp = __p->fds_bits;
int __i;
if (__builtin_constant_p(__FDSET_LONGS)) {
switch (__FDSET_LONGS) {
case 16:
__tmp[ 0] = 0; __tmp[ 1] = 0;
__tmp[ 2] = 0; __tmp[ 3] = 0;
__tmp[ 4] = 0; __tmp[ 5] = 0;
__tmp[ 6] = 0; __tmp[ 7] = 0;
__tmp[ 8] = 0; __tmp[ 9] = 0;
__tmp[10] = 0; __tmp[11] = 0;
__tmp[12] = 0; __tmp[13] = 0;
__tmp[14] = 0; __tmp[15] = 0;
return;
case 8:
__tmp[ 0] = 0; __tmp[ 1] = 0;
__tmp[ 2] = 0; __tmp[ 3] = 0;
__tmp[ 4] = 0; __tmp[ 5] = 0;
__tmp[ 6] = 0; __tmp[ 7] = 0;
return;
case 4:
__tmp[ 0] = 0; __tmp[ 1] = 0;
__tmp[ 2] = 0; __tmp[ 3] = 0;
return;
}
}
__i = __FDSET_LONGS;
while (__i) {
__i--;
*__tmp = 0;
__tmp++;
}
}
#endif /* defined(__KERNEL__) */
#endif

View File

@@ -0,0 +1,39 @@
/*
* include/asm-parisc/prefetch.h
*
* PA 2.0 defines data prefetch instructions on page 6-11 of the Kane book.
* In addition, many implementations do hardware prefetching of both
* instructions and data.
*
* PA7300LC (page 14-4 of the ERS) also implements prefetching by a load
* to gr0 but not in a way that Linux can use. If the load would cause an
* interruption (eg due to prefetching 0), it is suppressed on PA2.0
* processors, but not on 7300LC.
*
*/
#ifndef __ASM_PARISC_PREFETCH_H
#define __ASM_PARISC_PREFETCH_H
#ifndef __ASSEMBLY__
#ifdef CONFIG_PREFETCH
#define ARCH_HAS_PREFETCH
static inline void prefetch(const void *addr)
{
__asm__("ldw 0(%0), %%r0" : : "r" (addr));
}
/* LDD is a PA2.0 addition. */
#ifdef CONFIG_PA20
#define ARCH_HAS_PREFETCHW
static inline void prefetchw(const void *addr)
{
__asm__("ldd 0(%0), %%r0" : : "r" (addr));
}
#endif /* CONFIG_PA20 */
#endif /* CONFIG_PREFETCH */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PARISC_PROCESSOR_H */

View File

@@ -0,0 +1,357 @@
/*
* include/asm-parisc/processor.h
*
* Copyright (C) 1994 Linus Torvalds
* Copyright (C) 2001 Grant Grundler
*/
#ifndef __ASM_PARISC_PROCESSOR_H
#define __ASM_PARISC_PROCESSOR_H
#ifndef __ASSEMBLY__
#include <linux/threads.h>
#include <asm/prefetch.h>
#include <asm/hardware.h>
#include <asm/pdc.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/system.h>
#endif /* __ASSEMBLY__ */
#define KERNEL_STACK_SIZE (4*PAGE_SIZE)
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
#ifdef CONFIG_PA20
#define current_ia(x) __asm__("mfia %0" : "=r"(x))
#else /* mfia added in pa2.0 */
#define current_ia(x) __asm__("blr 0,%0\n\tnop" : "=r"(x))
#endif
#define current_text_addr() ({ void *pc; current_ia(pc); pc; })
#define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size)
#define TASK_SIZE TASK_SIZE_OF(current)
#define TASK_UNMAPPED_BASE (current->thread.map_base)
#define DEFAULT_TASK_SIZE32 (0xFFF00000UL)
#define DEFAULT_MAP_BASE32 (0x40000000UL)
#ifdef CONFIG_64BIT
#define DEFAULT_TASK_SIZE (MAX_ADDRESS-0xf000000)
#define DEFAULT_MAP_BASE (0x200000000UL)
#else
#define DEFAULT_TASK_SIZE DEFAULT_TASK_SIZE32
#define DEFAULT_MAP_BASE DEFAULT_MAP_BASE32
#endif
#ifdef __KERNEL__
/* XXX: STACK_TOP actually should be STACK_BOTTOM for parisc.
* prumpf */
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX DEFAULT_TASK_SIZE
#endif
#ifndef __ASSEMBLY__
/*
* Data detected about CPUs at boot time which is the same for all CPU's.
* HP boxes are SMP - ie identical processors.
*
* FIXME: some CPU rev info may be processor specific...
*/
struct system_cpuinfo_parisc {
unsigned int cpu_count;
unsigned int cpu_hz;
unsigned int hversion;
unsigned int sversion;
enum cpu_type cpu_type;
struct {
struct pdc_model model;
unsigned long versions;
unsigned long cpuid;
unsigned long capabilities;
char sys_model_name[81]; /* PDC-ROM returnes this model name */
} pdc;
const char *cpu_name; /* e.g. "PA7300LC (PCX-L2)" */
const char *family_name; /* e.g. "1.1e" */
};
/* Per CPU data structure - ie varies per CPU. */
struct cpuinfo_parisc {
unsigned long it_value; /* Interval Timer at last timer Intr */
unsigned long it_delta; /* Interval delta (tic_10ms / HZ * 100) */
unsigned long irq_count; /* number of IRQ's since boot */
unsigned long irq_max_cr16; /* longest time to handle a single IRQ */
unsigned long cpuid; /* aka slot_number or set to NO_PROC_ID */
unsigned long hpa; /* Host Physical address */
unsigned long txn_addr; /* MMIO addr of EIR or id_eid */
#ifdef CONFIG_SMP
unsigned long pending_ipi; /* bitmap of type ipi_message_type */
unsigned long ipi_count; /* number ipi Interrupts */
#endif
unsigned long bh_count; /* number of times bh was invoked */
unsigned long prof_counter; /* per CPU profiling support */
unsigned long prof_multiplier; /* per CPU profiling support */
unsigned long fp_rev;
unsigned long fp_model;
unsigned int state;
struct parisc_device *dev;
unsigned long loops_per_jiffy;
};
extern struct system_cpuinfo_parisc boot_cpu_data;
extern struct cpuinfo_parisc cpu_data[NR_CPUS];
#define current_cpu_data cpu_data[smp_processor_id()]
#define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
typedef struct {
int seg;
} mm_segment_t;
#define ARCH_MIN_TASKALIGN 8
struct thread_struct {
struct pt_regs regs;
unsigned long task_size;
unsigned long map_base;
unsigned long flags;
};
/* Thread struct flags. */
#define PARISC_UAC_NOPRINT (1UL << 0) /* see prctl and unaligned.c */
#define PARISC_UAC_SIGBUS (1UL << 1)
#define PARISC_KERNEL_DEATH (1UL << 31) /* see die_if_kernel()... */
#define PARISC_UAC_SHIFT 0
#define PARISC_UAC_MASK (PARISC_UAC_NOPRINT|PARISC_UAC_SIGBUS)
#define SET_UNALIGN_CTL(task,value) \
({ \
(task)->thread.flags = (((task)->thread.flags & ~PARISC_UAC_MASK) \
| (((value) << PARISC_UAC_SHIFT) & \
PARISC_UAC_MASK)); \
0; \
})
#define GET_UNALIGN_CTL(task,addr) \
({ \
put_user(((task)->thread.flags & PARISC_UAC_MASK) \
>> PARISC_UAC_SHIFT, (int __user *) (addr)); \
})
#define INIT_THREAD { \
.regs = { .gr = { 0, }, \
.fr = { 0, }, \
.sr = { 0, }, \
.iasq = { 0, }, \
.iaoq = { 0, }, \
.cr27 = 0, \
}, \
.task_size = DEFAULT_TASK_SIZE, \
.map_base = DEFAULT_MAP_BASE, \
.flags = 0 \
}
/*
* Return saved PC of a blocked thread. This is used by ps mostly.
*/
unsigned long thread_saved_pc(struct task_struct *t);
void show_trace(struct task_struct *task, unsigned long *stack);
/*
* Start user thread in another space.
*
* Note that we set both the iaoq and r31 to the new pc. When
* the kernel initially calls execve it will return through an
* rfi path that will use the values in the iaoq. The execve
* syscall path will return through the gateway page, and
* that uses r31 to branch to.
*
* For ELF we clear r23, because the dynamic linker uses it to pass
* the address of the finalizer function.
*
* We also initialize sr3 to an illegal value (illegal for our
* implementation, not for the architecture).
*/
typedef unsigned int elf_caddr_t;
#define start_thread_som(regs, new_pc, new_sp) do { \
unsigned long *sp = (unsigned long *)new_sp; \
__u32 spaceid = (__u32)current->mm->context; \
unsigned long pc = (unsigned long)new_pc; \
/* offset pc for priv. level */ \
pc |= 3; \
\
set_fs(USER_DS); \
regs->iasq[0] = spaceid; \
regs->iasq[1] = spaceid; \
regs->iaoq[0] = pc; \
regs->iaoq[1] = pc + 4; \
regs->sr[2] = LINUX_GATEWAY_SPACE; \
regs->sr[3] = 0xffff; \
regs->sr[4] = spaceid; \
regs->sr[5] = spaceid; \
regs->sr[6] = spaceid; \
regs->sr[7] = spaceid; \
regs->gr[ 0] = USER_PSW; \
regs->gr[30] = ((new_sp)+63)&~63; \
regs->gr[31] = pc; \
\
get_user(regs->gr[26],&sp[0]); \
get_user(regs->gr[25],&sp[-1]); \
get_user(regs->gr[24],&sp[-2]); \
get_user(regs->gr[23],&sp[-3]); \
} while(0)
/* The ELF abi wants things done a "wee bit" differently than
* som does. Supporting this behavior here avoids
* having our own version of create_elf_tables.
*
* Oh, and yes, that is not a typo, we are really passing argc in r25
* and argv in r24 (rather than r26 and r25). This is because that's
* where __libc_start_main wants them.
*
* Duplicated from dl-machine.h for the benefit of readers:
*
* Our initial stack layout is rather different from everyone else's
* due to the unique PA-RISC ABI. As far as I know it looks like
* this:
----------------------------------- (user startup code creates this frame)
| 32 bytes of magic |
|---------------------------------|
| 32 bytes argument/sp save area |
|---------------------------------| (bprm->p)
| ELF auxiliary info |
| (up to 28 words) |
|---------------------------------|
| NULL |
|---------------------------------|
| Environment pointers |
|---------------------------------|
| NULL |
|---------------------------------|
| Argument pointers |
|---------------------------------| <- argv
| argc (1 word) |
|---------------------------------| <- bprm->exec (HACK!)
| N bytes of slack |
|---------------------------------|
| filename passed to execve |
|---------------------------------| (mm->env_end)
| env strings |
|---------------------------------| (mm->env_start, mm->arg_end)
| arg strings |
|---------------------------------|
| additional faked arg strings if |
| we're invoked via binfmt_script |
|---------------------------------| (mm->arg_start)
stack base is at TASK_SIZE - rlim_max.
on downward growing arches, it looks like this:
stack base at TASK_SIZE
| filename passed to execve
| env strings
| arg strings
| faked arg strings
| slack
| ELF
| envps
| argvs
| argc
* The pleasant part of this is that if we need to skip arguments we
* can just decrement argc and move argv, because the stack pointer
* is utterly unrelated to the location of the environment and
* argument vectors.
*
* Note that the S/390 people took the easy way out and hacked their
* GCC to make the stack grow downwards.
*
* Final Note: For entry from syscall, the W (wide) bit of the PSW
* is stuffed into the lowest bit of the user sp (%r30), so we fill
* it in here from the current->personality
*/
#ifdef CONFIG_64BIT
#define USER_WIDE_MODE (!test_thread_flag(TIF_32BIT))
#else
#define USER_WIDE_MODE 0
#endif
#define start_thread(regs, new_pc, new_sp) do { \
elf_addr_t *sp = (elf_addr_t *)new_sp; \
__u32 spaceid = (__u32)current->mm->context; \
elf_addr_t pc = (elf_addr_t)new_pc | 3; \
elf_caddr_t *argv = (elf_caddr_t *)bprm->exec + 1; \
\
set_fs(USER_DS); \
regs->iasq[0] = spaceid; \
regs->iasq[1] = spaceid; \
regs->iaoq[0] = pc; \
regs->iaoq[1] = pc + 4; \
regs->sr[2] = LINUX_GATEWAY_SPACE; \
regs->sr[3] = 0xffff; \
regs->sr[4] = spaceid; \
regs->sr[5] = spaceid; \
regs->sr[6] = spaceid; \
regs->sr[7] = spaceid; \
regs->gr[ 0] = USER_PSW | (USER_WIDE_MODE ? PSW_W : 0); \
regs->fr[ 0] = 0LL; \
regs->fr[ 1] = 0LL; \
regs->fr[ 2] = 0LL; \
regs->fr[ 3] = 0LL; \
regs->gr[30] = (((unsigned long)sp + 63) &~ 63) | (USER_WIDE_MODE ? 1 : 0); \
regs->gr[31] = pc; \
\
get_user(regs->gr[25], (argv - 1)); \
regs->gr[24] = (long) argv; \
regs->gr[23] = 0; \
} while(0)
struct task_struct;
struct mm_struct;
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
extern void map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm);
extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.regs.iaoq[0])
#define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30])
#define cpu_relax() barrier()
/* Used as a macro to identify the combined VIPT/PIPT cached
* CPUs which require a guarantee of coherency (no inequivalent
* aliases with different data, whether clean or not) to operate */
static inline int parisc_requires_coherency(void)
{
#ifdef CONFIG_PA8X00
return (boot_cpu_data.cpu_type == mako) ||
(boot_cpu_data.cpu_type == mako2);
#else
return 0;
#endif
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PARISC_PROCESSOR_H */

View File

@@ -0,0 +1,62 @@
#ifndef _PARISC_PSW_H
#define PSW_I 0x00000001
#define PSW_D 0x00000002
#define PSW_P 0x00000004
#define PSW_Q 0x00000008
#define PSW_R 0x00000010
#define PSW_F 0x00000020
#define PSW_G 0x00000040 /* PA1.x only */
#define PSW_O 0x00000080 /* PA2.0 only */
/* ssm/rsm instructions number PSW_W and PSW_E differently */
#define PSW_SM_I PSW_I /* Enable External Interrupts */
#define PSW_SM_D PSW_D
#define PSW_SM_P PSW_P
#define PSW_SM_Q PSW_Q /* Enable Interrupt State Collection */
#define PSW_SM_R PSW_R /* Enable Recover Counter Trap */
#define PSW_SM_W 0x200 /* PA2.0 only : Enable Wide Mode */
#define PSW_SM_QUIET PSW_SM_R+PSW_SM_Q+PSW_SM_P+PSW_SM_D+PSW_SM_I
#define PSW_CB 0x0000ff00
#define PSW_M 0x00010000
#define PSW_V 0x00020000
#define PSW_C 0x00040000
#define PSW_B 0x00080000
#define PSW_X 0x00100000
#define PSW_N 0x00200000
#define PSW_L 0x00400000
#define PSW_H 0x00800000
#define PSW_T 0x01000000
#define PSW_S 0x02000000
#define PSW_E 0x04000000
#define PSW_W 0x08000000 /* PA2.0 only */
#define PSW_W_BIT 36 /* PA2.0 only */
#define PSW_Z 0x40000000 /* PA1.x only */
#define PSW_Y 0x80000000 /* PA1.x only */
#ifdef CONFIG_64BIT
# define PSW_HI_CB 0x000000ff /* PA2.0 only */
#endif
#ifdef CONFIG_64BIT
# define USER_PSW_HI_MASK PSW_HI_CB
# define WIDE_PSW PSW_W
#else
# define WIDE_PSW 0
#endif
/* Used when setting up for rfi */
#define KERNEL_PSW (WIDE_PSW | PSW_C | PSW_Q | PSW_P | PSW_D)
#define REAL_MODE_PSW (WIDE_PSW | PSW_Q)
#define USER_PSW_MASK (WIDE_PSW | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB)
#define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I)
#endif

View File

@@ -0,0 +1,68 @@
#ifndef _PARISC_PTRACE_H
#define _PARISC_PTRACE_H
/* written by Philipp Rumpf, Copyright (C) 1999 SuSE GmbH Nuernberg
** Copyright (C) 2000 Grant Grundler, Hewlett-Packard
*/
#include <linux/types.h>
/* This struct defines the way the registers are stored on the
* stack during a system call.
*
* N.B. gdb/strace care about the size and offsets within this
* structure. If you change things, you may break object compatibility
* for those applications.
*/
struct pt_regs {
unsigned long gr[32]; /* PSW is in gr[0] */
__u64 fr[32];
unsigned long sr[ 8];
unsigned long iasq[2];
unsigned long iaoq[2];
unsigned long cr27;
unsigned long pad0; /* available for other uses */
unsigned long orig_r28;
unsigned long ksp;
unsigned long kpc;
unsigned long sar; /* CR11 */
unsigned long iir; /* CR19 */
unsigned long isr; /* CR20 */
unsigned long ior; /* CR21 */
unsigned long ipsw; /* CR22 */
};
/*
* The numbers chosen here are somewhat arbitrary but absolutely MUST
* not overlap with any of the number assigned in <linux/ptrace.h>.
*
* These ones are taken from IA-64 on the assumption that theirs are
* the most correct (and we also want to support PTRACE_SINGLEBLOCK
* since we have taken branch traps too)
*/
#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
#ifdef __KERNEL__
#define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS))
#define __ARCH_WANT_COMPAT_SYS_PTRACE
struct task_struct;
#define arch_has_single_step() 1
void user_disable_single_step(struct task_struct *task);
void user_enable_single_step(struct task_struct *task);
#define arch_has_block_step() 1
void user_enable_block_step(struct task_struct *task);
/* XXX should we use iaoq[1] or iaoq[0] ? */
#define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0)
#define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0)
#define instruction_pointer(regs) ((regs)->iaoq[0] & ~3)
unsigned long profile_pc(struct pt_regs *);
extern void show_regs(struct pt_regs *);
#endif
#endif

View File

@@ -0,0 +1,5 @@
#ifndef _PARISC_REAL_H
#define _PARISC_REAL_H
#endif

View File

@@ -0,0 +1,7 @@
#ifndef _ASM_PARISC_RESOURCE_H
#define _ASM_PARISC_RESOURCE_H
#define _STK_LIM_MAX 10 * _STK_LIM
#include <asm-generic/resource.h>
#endif

View File

@@ -0,0 +1,322 @@
#ifndef _ASM_PARISC_ROPES_H_
#define _ASM_PARISC_ROPES_H_
#include <asm/parisc-device.h>
#ifdef CONFIG_64BIT
/* "low end" PA8800 machines use ZX1 chipset: PAT PDC and only run 64-bit */
#define ZX1_SUPPORT
#endif
#ifdef CONFIG_PROC_FS
/* depends on proc fs support. But costs CPU performance */
#undef SBA_COLLECT_STATS
#endif
/*
** The number of pdir entries to "free" before issuing
** a read to PCOM register to flush out PCOM writes.
** Interacts with allocation granularity (ie 4 or 8 entries
** allocated and free'd/purged at a time might make this
** less interesting).
*/
#define DELAYED_RESOURCE_CNT 16
#define MAX_IOC 2 /* per Ike. Pluto/Astro only have 1. */
#define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */
struct ioc {
void __iomem *ioc_hpa; /* I/O MMU base address */
char *res_map; /* resource map, bit == pdir entry */
u64 *pdir_base; /* physical base address */
unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
#ifdef ZX1_SUPPORT
unsigned long iovp_mask; /* help convert IOVA to IOVP */
#endif
unsigned long *res_hint; /* next avail IOVP - circular search */
spinlock_t res_lock;
unsigned int res_bitshift; /* from the LEFT! */
unsigned int res_size; /* size of resource map in bytes */
#ifdef SBA_HINT_SUPPORT
/* FIXME : DMA HINTs not used */
unsigned long hint_mask_pdir; /* bits used for DMA hints */
unsigned int hint_shift_pdir;
#endif
#if DELAYED_RESOURCE_CNT > 0
int saved_cnt;
struct sba_dma_pair {
dma_addr_t iova;
size_t size;
} saved[DELAYED_RESOURCE_CNT];
#endif
#ifdef SBA_COLLECT_STATS
#define SBA_SEARCH_SAMPLE 0x100
unsigned long avg_search[SBA_SEARCH_SAMPLE];
unsigned long avg_idx; /* current index into avg_search */
unsigned long used_pages;
unsigned long msingle_calls;
unsigned long msingle_pages;
unsigned long msg_calls;
unsigned long msg_pages;
unsigned long usingle_calls;
unsigned long usingle_pages;
unsigned long usg_calls;
unsigned long usg_pages;
#endif
/* STUFF We don't need in performance path */
unsigned int pdir_size; /* in bytes, determined by IOV Space size */
};
struct sba_device {
struct sba_device *next; /* list of SBA's in system */
struct parisc_device *dev; /* dev found in bus walk */
const char *name;
void __iomem *sba_hpa; /* base address */
spinlock_t sba_lock;
unsigned int flags; /* state/functionality enabled */
unsigned int hw_rev; /* HW revision of chip */
struct resource chip_resv; /* MMIO reserved for chip */
struct resource iommu_resv; /* MMIO reserved for iommu */
unsigned int num_ioc; /* number of on-board IOC's */
struct ioc ioc[MAX_IOC];
};
#define ASTRO_RUNWAY_PORT 0x582
#define IKE_MERCED_PORT 0x803
#define REO_MERCED_PORT 0x804
#define REOG_MERCED_PORT 0x805
#define PLUTO_MCKINLEY_PORT 0x880
static inline int IS_ASTRO(struct parisc_device *d) {
return d->id.hversion == ASTRO_RUNWAY_PORT;
}
static inline int IS_IKE(struct parisc_device *d) {
return d->id.hversion == IKE_MERCED_PORT;
}
static inline int IS_PLUTO(struct parisc_device *d) {
return d->id.hversion == PLUTO_MCKINLEY_PORT;
}
#define PLUTO_IOVA_BASE (1UL*1024*1024*1024) /* 1GB */
#define PLUTO_IOVA_SIZE (1UL*1024*1024*1024) /* 1GB */
#define PLUTO_GART_SIZE (PLUTO_IOVA_SIZE / 2)
#define SBA_PDIR_VALID_BIT 0x8000000000000000ULL
#define SBA_AGPGART_COOKIE 0x0000badbadc0ffeeULL
#define SBA_FUNC_ID 0x0000 /* function id */
#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
#define SBA_FUNC_SIZE 4096 /* SBA configuration function reg set */
#define ASTRO_IOC_OFFSET (32 * SBA_FUNC_SIZE)
#define PLUTO_IOC_OFFSET (1 * SBA_FUNC_SIZE)
/* Ike's IOC's occupy functions 2 and 3 */
#define IKE_IOC_OFFSET(p) ((p+2) * SBA_FUNC_SIZE)
#define IOC_CTRL 0x8 /* IOC_CTRL offset */
#define IOC_CTRL_TC (1 << 0) /* TOC Enable */
#define IOC_CTRL_CE (1 << 1) /* Coalesce Enable */
#define IOC_CTRL_DE (1 << 2) /* Dillon Enable */
#define IOC_CTRL_RM (1 << 8) /* Real Mode */
#define IOC_CTRL_NC (1 << 9) /* Non Coherent Mode */
#define IOC_CTRL_D4 (1 << 11) /* Disable 4-byte coalescing */
#define IOC_CTRL_DD (1 << 13) /* Disable distr. LMMIO range coalescing */
/*
** Offsets into MBIB (Function 0 on Ike and hopefully Astro)
** Firmware programs this stuff. Don't touch it.
*/
#define LMMIO_DIRECT0_BASE 0x300
#define LMMIO_DIRECT0_MASK 0x308
#define LMMIO_DIRECT0_ROUTE 0x310
#define LMMIO_DIST_BASE 0x360
#define LMMIO_DIST_MASK 0x368
#define LMMIO_DIST_ROUTE 0x370
#define IOS_DIST_BASE 0x390
#define IOS_DIST_MASK 0x398
#define IOS_DIST_ROUTE 0x3A0
#define IOS_DIRECT_BASE 0x3C0
#define IOS_DIRECT_MASK 0x3C8
#define IOS_DIRECT_ROUTE 0x3D0
/*
** Offsets into I/O TLB (Function 2 and 3 on Ike)
*/
#define ROPE0_CTL 0x200 /* "regbus pci0" */
#define ROPE1_CTL 0x208
#define ROPE2_CTL 0x210
#define ROPE3_CTL 0x218
#define ROPE4_CTL 0x220
#define ROPE5_CTL 0x228
#define ROPE6_CTL 0x230
#define ROPE7_CTL 0x238
#define IOC_ROPE0_CFG 0x500 /* pluto only */
#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
#define HF_ENABLE 0x40
#define IOC_IBASE 0x300 /* IO TLB */
#define IOC_IMASK 0x308
#define IOC_PCOM 0x310
#define IOC_TCNFG 0x318
#define IOC_PDIR_BASE 0x320
/*
** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
** It's safer (avoid memory corruption) to keep DMA page mappings
** equivalently sized to VM PAGE_SIZE.
**
** We really can't avoid generating a new mapping for each
** page since the Virtual Coherence Index has to be generated
** and updated for each page.
**
** PAGE_SIZE could be greater than IOVP_SIZE. But not the inverse.
*/
#define IOVP_SIZE PAGE_SIZE
#define IOVP_SHIFT PAGE_SHIFT
#define IOVP_MASK PAGE_MASK
#define SBA_PERF_CFG 0x708 /* Performance Counter stuff */
#define SBA_PERF_MASK1 0x718
#define SBA_PERF_MASK2 0x730
/*
** Offsets into PCI Performance Counters (functions 12 and 13)
** Controlled by PERF registers in function 2 & 3 respectively.
*/
#define SBA_PERF_CNT1 0x200
#define SBA_PERF_CNT2 0x208
#define SBA_PERF_CNT3 0x210
/*
** lba_device: Per instance Elroy data structure
*/
struct lba_device {
struct pci_hba_data hba;
spinlock_t lba_lock;
void *iosapic_obj;
#ifdef CONFIG_64BIT
void __iomem *iop_base; /* PA_VIEW - for IO port accessor funcs */
#endif
int flags; /* state/functionality enabled */
int hw_rev; /* HW revision of chip */
};
#define ELROY_HVERS 0x782
#define MERCURY_HVERS 0x783
#define QUICKSILVER_HVERS 0x784
static inline int IS_ELROY(struct parisc_device *d) {
return (d->id.hversion == ELROY_HVERS);
}
static inline int IS_MERCURY(struct parisc_device *d) {
return (d->id.hversion == MERCURY_HVERS);
}
static inline int IS_QUICKSILVER(struct parisc_device *d) {
return (d->id.hversion == QUICKSILVER_HVERS);
}
static inline int agp_mode_mercury(void __iomem *hpa) {
u64 bus_mode;
bus_mode = readl(hpa + 0x0620);
if (bus_mode & 1)
return 1;
return 0;
}
/*
** I/O SAPIC init function
** Caller knows where an I/O SAPIC is. LBA has an integrated I/O SAPIC.
** Call setup as part of per instance initialization.
** (ie *not* init_module() function unless only one is present.)
** fixup_irq is to initialize PCI IRQ line support and
** virtualize pcidev->irq value. To be called by pci_fixup_bus().
*/
extern void *iosapic_register(unsigned long hpa);
extern int iosapic_fixup_irq(void *obj, struct pci_dev *pcidev);
#define LBA_FUNC_ID 0x0000 /* function id */
#define LBA_FCLASS 0x0008 /* function class, bist, header, rev... */
#define LBA_CAPABLE 0x0030 /* capabilities register */
#define LBA_PCI_CFG_ADDR 0x0040 /* poke CFG address here */
#define LBA_PCI_CFG_DATA 0x0048 /* read or write data here */
#define LBA_PMC_MTLT 0x0050 /* Firmware sets this - read only. */
#define LBA_FW_SCRATCH 0x0058 /* Firmware writes the PCI bus number here. */
#define LBA_ERROR_ADDR 0x0070 /* On error, address gets logged here */
#define LBA_ARB_MASK 0x0080 /* bit 0 enable arbitration. PAT/PDC enables */
#define LBA_ARB_PRI 0x0088 /* firmware sets this. */
#define LBA_ARB_MODE 0x0090 /* firmware sets this. */
#define LBA_ARB_MTLT 0x0098 /* firmware sets this. */
#define LBA_MOD_ID 0x0100 /* Module ID. PDC_PAT_CELL reports 4 */
#define LBA_STAT_CTL 0x0108 /* Status & Control */
#define LBA_BUS_RESET 0x01 /* Deassert PCI Bus Reset Signal */
#define CLEAR_ERRLOG 0x10 /* "Clear Error Log" cmd */
#define CLEAR_ERRLOG_ENABLE 0x20 /* "Clear Error Log" Enable */
#define HF_ENABLE 0x40 /* enable HF mode (default is -1 mode) */
#define LBA_LMMIO_BASE 0x0200 /* < 4GB I/O address range */
#define LBA_LMMIO_MASK 0x0208
#define LBA_GMMIO_BASE 0x0210 /* > 4GB I/O address range */
#define LBA_GMMIO_MASK 0x0218
#define LBA_WLMMIO_BASE 0x0220 /* All < 4GB ranges under the same *SBA* */
#define LBA_WLMMIO_MASK 0x0228
#define LBA_WGMMIO_BASE 0x0230 /* All > 4GB ranges under the same *SBA* */
#define LBA_WGMMIO_MASK 0x0238
#define LBA_IOS_BASE 0x0240 /* I/O port space for this LBA */
#define LBA_IOS_MASK 0x0248
#define LBA_ELMMIO_BASE 0x0250 /* Extra LMMIO range */
#define LBA_ELMMIO_MASK 0x0258
#define LBA_EIOS_BASE 0x0260 /* Extra I/O port space */
#define LBA_EIOS_MASK 0x0268
#define LBA_GLOBAL_MASK 0x0270 /* Mercury only: Global Address Mask */
#define LBA_DMA_CTL 0x0278 /* firmware sets this */
#define LBA_IBASE 0x0300 /* SBA DMA support */
#define LBA_IMASK 0x0308
/* FIXME: ignore DMA Hint stuff until we can measure performance */
#define LBA_HINT_CFG 0x0310
#define LBA_HINT_BASE 0x0380 /* 14 registers at every 8 bytes. */
#define LBA_BUS_MODE 0x0620
/* ERROR regs are needed for config cycle kluges */
#define LBA_ERROR_CONFIG 0x0680
#define LBA_SMART_MODE 0x20
#define LBA_ERROR_STATUS 0x0688
#define LBA_ROPE_CTL 0x06A0
#define LBA_IOSAPIC_BASE 0x800 /* Offset of IRQ logic */
#endif /*_ASM_PARISC_ROPES_H_*/

View File

@@ -0,0 +1,23 @@
#ifndef _ASM_PARISC_RT_SIGFRAME_H
#define _ASM_PARISC_RT_SIGFRAME_H
#define SIGRETURN_TRAMP 4
#define SIGRESTARTBLOCK_TRAMP 5
#define TRAMP_SIZE (SIGRETURN_TRAMP + SIGRESTARTBLOCK_TRAMP)
struct rt_sigframe {
/* XXX: Must match trampoline size in arch/parisc/kernel/signal.c
Secondary to that it must protect the ERESTART_RESTARTBLOCK
trampoline we left on the stack (we were bad and didn't
change sp so we could run really fast.) */
unsigned int tramp[TRAMP_SIZE];
struct siginfo info;
struct ucontext uc;
};
#define SIGFRAME 128
#define FUNCTIONCALLFRAME 96
#define PARISC_RT_SIGFRAME_SIZE \
(((sizeof(struct rt_sigframe) + FUNCTIONCALLFRAME) + SIGFRAME) & -SIGFRAME)
#endif

View File

@@ -0,0 +1,131 @@
/*
* include/asm-parisc/rtc.h
*
* Copyright 2002 Randolph CHung <tausq@debian.org>
*
* Based on: include/asm-ppc/rtc.h and the genrtc driver in the
* 2.4 parisc linux tree
*/
#ifndef __ASM_RTC_H__
#define __ASM_RTC_H__
#ifdef __KERNEL__
#include <linux/rtc.h>
#include <asm/pdc.h>
#define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
#define RTC_PIE 0x40 /* periodic interrupt enable */
#define RTC_AIE 0x20 /* alarm interrupt enable */
#define RTC_UIE 0x10 /* update-finished interrupt enable */
#define RTC_BATT_BAD 0x100 /* battery bad */
/* some dummy definitions */
#define RTC_SQWE 0x08 /* enable square-wave output */
#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
# define __isleap(year) \
((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
/* How many days come before each month (0-12). */
static const unsigned short int __mon_yday[2][13] =
{
/* Normal years. */
{ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
/* Leap years. */
{ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
};
static inline unsigned int get_rtc_time(struct rtc_time *wtime)
{
struct pdc_tod tod_data;
long int days, rem, y;
const unsigned short int *ip;
memset(wtime, 0, sizeof(*wtime));
if (pdc_tod_read(&tod_data) < 0)
return RTC_24H | RTC_BATT_BAD;
// most of the remainder of this function is:
// Copyright (C) 1991, 1993, 1997, 1998 Free Software Foundation, Inc.
// This was originally a part of the GNU C Library.
// It is distributed under the GPL, and was swiped from offtime.c
days = tod_data.tod_sec / SECS_PER_DAY;
rem = tod_data.tod_sec % SECS_PER_DAY;
wtime->tm_hour = rem / SECS_PER_HOUR;
rem %= SECS_PER_HOUR;
wtime->tm_min = rem / 60;
wtime->tm_sec = rem % 60;
y = 1970;
#define DIV(a, b) ((a) / (b) - ((a) % (b) < 0))
#define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400))
while (days < 0 || days >= (__isleap (y) ? 366 : 365))
{
/* Guess a corrected year, assuming 365 days per year. */
long int yg = y + days / 365 - (days % 365 < 0);
/* Adjust DAYS and Y to match the guessed year. */
days -= ((yg - y) * 365
+ LEAPS_THRU_END_OF (yg - 1)
- LEAPS_THRU_END_OF (y - 1));
y = yg;
}
wtime->tm_year = y - 1900;
ip = __mon_yday[__isleap(y)];
for (y = 11; days < (long int) ip[y]; --y)
continue;
days -= ip[y];
wtime->tm_mon = y;
wtime->tm_mday = days + 1;
return RTC_24H;
}
static int set_rtc_time(struct rtc_time *wtime)
{
u_int32_t secs;
secs = mktime(wtime->tm_year + 1900, wtime->tm_mon + 1, wtime->tm_mday,
wtime->tm_hour, wtime->tm_min, wtime->tm_sec);
if(pdc_tod_set(secs, 0) < 0)
return -1;
else
return 0;
}
static inline unsigned int get_rtc_ss(void)
{
struct rtc_time h;
get_rtc_time(&h);
return h.tm_sec;
}
static inline int get_rtc_pll(struct rtc_pll_info *pll)
{
return -EINVAL;
}
static inline int set_rtc_pll(struct rtc_pll_info *pll)
{
return -EINVAL;
}
#endif /* __KERNEL__ */
#endif /* __ASM_RTC_H__ */

View File

@@ -0,0 +1,12 @@
#ifndef ASM_PARISC_RUNWAY_H
#define ASM_PARISC_RUNWAY_H
#ifdef __KERNEL__
/* declared in arch/parisc/kernel/setup.c */
extern struct proc_dir_entry * proc_runway_root;
#define RUNWAY_STATUS 0x10
#define RUNWAY_DEBUG 0x40
#endif /* __KERNEL__ */
#endif /* ASM_PARISC_RUNWAY_H */

View File

@@ -0,0 +1,27 @@
#ifndef _ASM_PARISC_SCATTERLIST_H
#define _ASM_PARISC_SCATTERLIST_H
#include <asm/page.h>
#include <asm/types.h>
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
unsigned long sg_magic;
#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
/* an IOVA can be 64-bits on some PA-Risc platforms. */
dma_addr_t iova; /* I/O Virtual Address */
__u32 iova_length; /* bytes mapped */
};
#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
#define sg_dma_address(sg) ((sg)->iova)
#define sg_dma_len(sg) ((sg)->iova_length)
#define ISA_DMA_THRESHOLD (~0UL)
#endif /* _ASM_PARISC_SCATTERLIST_H */

View File

@@ -0,0 +1,12 @@
#ifndef _PARISC_SECTIONS_H
#define _PARISC_SECTIONS_H
/* nothing to see, move along */
#include <asm-generic/sections.h>
#ifdef CONFIG_64BIT
#undef dereference_function_descriptor
void *dereference_function_descriptor(void *);
#endif
#endif

View File

@@ -0,0 +1,6 @@
#ifndef __PARISC_SEGMENT_H
#define __PARISC_SEGMENT_H
/* Only here because we have some old header files that expect it.. */
#endif

View File

@@ -0,0 +1,29 @@
#ifndef _PARISC_SEMBUF_H
#define _PARISC_SEMBUF_H
/*
* The semid64_ds structure for parisc architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*/
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
#ifndef CONFIG_64BIT
unsigned int __pad1;
#endif
__kernel_time_t sem_otime; /* last semop time */
#ifndef CONFIG_64BIT
unsigned int __pad2;
#endif
__kernel_time_t sem_ctime; /* last change time */
unsigned int sem_nsems; /* no. of semaphores in array */
unsigned int __unused1;
unsigned int __unused2;
};
#endif /* _PARISC_SEMBUF_H */

View File

@@ -0,0 +1,10 @@
/*
* include/asm-parisc/serial.h
*/
/*
* This is used for 16550-compatible UARTs
*/
#define BASE_BAUD ( 1843200 / 16 )
#define SERIAL_PORT_DFNS

View File

@@ -0,0 +1,6 @@
#ifndef _PARISC_SETUP_H
#define _PARISC_SETUP_H
#define COMMAND_LINE_SIZE 1024
#endif /* _PARISC_SETUP_H */

View File

@@ -0,0 +1,58 @@
#ifndef _PARISC_SHMBUF_H
#define _PARISC_SHMBUF_H
/*
* The shmid64_ds structure for parisc architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*/
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
#ifndef CONFIG_64BIT
unsigned int __pad1;
#endif
__kernel_time_t shm_atime; /* last attach time */
#ifndef CONFIG_64BIT
unsigned int __pad2;
#endif
__kernel_time_t shm_dtime; /* last detach time */
#ifndef CONFIG_64BIT
unsigned int __pad3;
#endif
__kernel_time_t shm_ctime; /* last change time */
#ifndef CONFIG_64BIT
unsigned int __pad4;
#endif
size_t shm_segsz; /* size of segment (bytes) */
__kernel_pid_t shm_cpid; /* pid of creator */
__kernel_pid_t shm_lpid; /* pid of last operator */
unsigned int shm_nattch; /* no. of current attaches */
unsigned int __unused1;
unsigned int __unused2;
};
#ifdef CONFIG_64BIT
/* The 'unsigned int' (formerly 'unsigned long') data types below will
* ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on
* a wide kernel, but if some of these values are meant to contain pointers
* they may need to be 'long long' instead. -PB XXX FIXME
*/
#endif
struct shminfo64 {
unsigned int shmmax;
unsigned int shmmin;
unsigned int shmmni;
unsigned int shmseg;
unsigned int shmall;
unsigned int __unused1;
unsigned int __unused2;
unsigned int __unused3;
unsigned int __unused4;
};
#endif /* _PARISC_SHMBUF_H */

View File

@@ -0,0 +1,8 @@
#ifndef _ASMPARISC_SHMPARAM_H
#define _ASMPARISC_SHMPARAM_H
#define __ARCH_FORCE_SHMLBA 1
#define SHMLBA 0x00400000 /* attach addr needs to be 4 Mb aligned */
#endif /* _ASMPARISC_SHMPARAM_H */

View File

@@ -0,0 +1,20 @@
#ifndef _ASMPARISC_SIGCONTEXT_H
#define _ASMPARISC_SIGCONTEXT_H
#define PARISC_SC_FLAG_ONSTACK 1<<0
#define PARISC_SC_FLAG_IN_SYSCALL 1<<1
/* We will add more stuff here as it becomes necessary, until we know
it works. */
struct sigcontext {
unsigned long sc_flags;
unsigned long sc_gr[32]; /* PSW in sc_gr[0] */
unsigned long long sc_fr[32]; /* FIXME, do we need other state info? */
unsigned long sc_iasq[2];
unsigned long sc_iaoq[2];
unsigned long sc_sar; /* cr11 */
};
#endif

View File

@@ -0,0 +1,9 @@
#ifndef _PARISC_SIGINFO_H
#define _PARISC_SIGINFO_H
#include <asm-generic/siginfo.h>
#undef NSIGTRAP
#define NSIGTRAP 4
#endif

View File

@@ -0,0 +1,153 @@
#ifndef _ASM_PARISC_SIGNAL_H
#define _ASM_PARISC_SIGNAL_H
#define SIGHUP 1
#define SIGINT 2
#define SIGQUIT 3
#define SIGILL 4
#define SIGTRAP 5
#define SIGABRT 6
#define SIGIOT 6
#define SIGEMT 7
#define SIGFPE 8
#define SIGKILL 9
#define SIGBUS 10
#define SIGSEGV 11
#define SIGSYS 12 /* Linux doesn't use this */
#define SIGPIPE 13
#define SIGALRM 14
#define SIGTERM 15
#define SIGUSR1 16
#define SIGUSR2 17
#define SIGCHLD 18
#define SIGPWR 19
#define SIGVTALRM 20
#define SIGPROF 21
#define SIGIO 22
#define SIGPOLL SIGIO
#define SIGWINCH 23
#define SIGSTOP 24
#define SIGTSTP 25
#define SIGCONT 26
#define SIGTTIN 27
#define SIGTTOU 28
#define SIGURG 29
#define SIGLOST 30 /* Linux doesn't use this either */
#define SIGUNUSED 31
#define SIGRESERVE SIGUNUSED
#define SIGXCPU 33
#define SIGXFSZ 34
#define SIGSTKFLT 36
/* These should not be considered constants from userland. */
#define SIGRTMIN 37
#define SIGRTMAX _NSIG /* it's 44 under HP/UX */
/*
* SA_FLAGS values:
*
* SA_ONSTACK indicates that a registered stack_t will be used.
* SA_RESTART flag to get restarting signals (which were the default long ago)
* SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
* SA_RESETHAND clears the handler when the signal is delivered.
* SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
* SA_NODEFER prevents the current signal from being masked in the handler.
*
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
* Unix names RESETHAND and NODEFER respectively.
*/
#define SA_ONSTACK 0x00000001
#define SA_RESETHAND 0x00000004
#define SA_NOCLDSTOP 0x00000008
#define SA_SIGINFO 0x00000010
#define SA_NODEFER 0x00000020
#define SA_RESTART 0x00000040
#define SA_NOCLDWAIT 0x00000080
#define _SA_SIGGFAULT 0x00000100 /* HPUX */
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
#define SA_RESTORER 0x04000000 /* obsolete -- ignored */
/*
* sigaltstack controls
*/
#define SS_ONSTACK 1
#define SS_DISABLE 2
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
#ifdef __KERNEL__
#define _NSIG 64
/* bits-per-word, where word apparently means 'long' not 'int' */
#define _NSIG_BPW BITS_PER_LONG
#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
#endif /* __KERNEL__ */
#define SIG_BLOCK 0 /* for blocking signals */
#define SIG_UNBLOCK 1 /* for unblocking signals */
#define SIG_SETMASK 2 /* for setting the signal mask */
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
# ifndef __ASSEMBLY__
# include <linux/types.h>
/* Avoid too many header ordering problems. */
struct siginfo;
/* Type of a signal handler. */
#ifdef CONFIG_64BIT
/* function pointers on 64-bit parisc are pointers to little structs and the
* compiler doesn't support code which changes or tests the address of
* the function in the little struct. This is really ugly -PB
*/
typedef char __user *__sighandler_t;
#else
typedef void __signalfn_t(int);
typedef __signalfn_t __user *__sighandler_t;
#endif
typedef struct sigaltstack {
void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
#ifdef __KERNEL__
/* Most things should be clean enough to redefine this at will, if care
is taken to make libc match. */
typedef unsigned long old_sigset_t; /* at least 32 bits */
typedef struct {
/* next_signal() assumes this is a long - no choice */
unsigned long sig[_NSIG_WORDS];
} sigset_t;
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
sigset_t sa_mask; /* mask last for extensibility */
};
struct k_sigaction {
struct sigaction sa;
};
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#include <asm/sigcontext.h>
#endif /* __KERNEL__ */
#endif /* !__ASSEMBLY */
#endif /* _ASM_PARISC_SIGNAL_H */

View File

@@ -0,0 +1,68 @@
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
#if defined(CONFIG_SMP)
/* Page Zero Location PDC will look for the address to branch to when we poke
** slave CPUs still in "Icache loop".
*/
#define PDC_OS_BOOT_RENDEZVOUS 0x10
#define PDC_OS_BOOT_RENDEZVOUS_HI 0x28
#ifndef ASSEMBLY
#include <linux/bitops.h>
#include <linux/threads.h> /* for NR_CPUS */
#include <linux/cpumask.h>
typedef unsigned long address_t;
extern cpumask_t cpu_online_map;
/*
* Private routines/data
*
* physical and logical are equivalent until we support CPU hotplug.
*/
#define cpu_number_map(cpu) (cpu)
#define cpu_logical_map(cpu) (cpu)
extern void smp_send_reschedule(int cpu);
extern void smp_send_all_nop(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi(cpumask_t mask);
#endif /* !ASSEMBLY */
/*
* This magic constant controls our willingness to transfer
* a process across CPUs. Such a transfer incurs cache and tlb
* misses. The current value is inherited from i386. Still needs
* to be tuned for parisc.
*/
#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
extern unsigned long cpu_present_mask;
#define raw_smp_processor_id() (current_thread_info()->cpu)
#else /* CONFIG_SMP */
static inline void smp_send_all_nop(void) { return; }
#endif
#define NO_PROC_ID 0xFF /* No processor magic marker */
#define ANY_PROC_ID 0xFF /* Any processor magic marker */
static inline int __cpu_disable (void) {
return 0;
}
static inline void __cpu_die (unsigned int cpu) {
while(1)
;
}
extern int __cpu_up (unsigned int cpu);
#endif /* __ASM_SMP_H */

View File

@@ -0,0 +1,62 @@
#ifndef _ASM_SOCKET_H
#define _ASM_SOCKET_H
#include <asm/sockios.h>
/* For setsockopt(2) */
#define SOL_SOCKET 0xffff
#define SO_DEBUG 0x0001
#define SO_REUSEADDR 0x0004
#define SO_KEEPALIVE 0x0008
#define SO_DONTROUTE 0x0010
#define SO_BROADCAST 0x0020
#define SO_LINGER 0x0080
#define SO_OOBINLINE 0x0100
/* To add :#define SO_REUSEPORT 0x0200 */
#define SO_SNDBUF 0x1001
#define SO_RCVBUF 0x1002
#define SO_SNDBUFFORCE 0x100a
#define SO_RCVBUFFORCE 0x100b
#define SO_SNDLOWAT 0x1003
#define SO_RCVLOWAT 0x1004
#define SO_SNDTIMEO 0x1005
#define SO_RCVTIMEO 0x1006
#define SO_ERROR 0x1007
#define SO_TYPE 0x1008
#define SO_PEERNAME 0x2000
#define SO_NO_CHECK 0x400b
#define SO_PRIORITY 0x400c
#define SO_BSDCOMPAT 0x400e
#define SO_PASSCRED 0x4010
#define SO_PEERCRED 0x4011
#define SO_TIMESTAMP 0x4012
#define SCM_TIMESTAMP SO_TIMESTAMP
#define SO_TIMESTAMPNS 0x4013
#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x4016
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x4017
#define SO_SECURITY_ENCRYPTION_NETWORK 0x4018
#define SO_BINDTODEVICE 0x4019
/* Socket filtering */
#define SO_ATTACH_FILTER 0x401a
#define SO_DETACH_FILTER 0x401b
#define SO_ACCEPTCONN 0x401c
#define SO_PEERSEC 0x401d
#define SO_PASSSEC 0x401e
#define SO_MARK 0x401f
/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
* have to define SOCK_NONBLOCK to a different value here.
*/
#define SOCK_NONBLOCK 0x40000000
#endif /* _ASM_SOCKET_H */

View File

@@ -0,0 +1,13 @@
#ifndef __ARCH_PARISC_SOCKIOS__
#define __ARCH_PARISC_SOCKIOS__
/* Socket-level I/O control calls. */
#define FIOSETOWN 0x8901
#define SIOCSPGRP 0x8902
#define FIOGETOWN 0x8903
#define SIOCGPGRP 0x8904
#define SIOCATMARK 0x8905
#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
#endif

View File

@@ -0,0 +1,194 @@
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/spinlock_types.h>
static inline int __raw_spin_is_locked(raw_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
return *a == 0;
}
#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
#define __raw_spin_unlock_wait(x) \
do { cpu_relax(); } while (__raw_spin_is_locked(x))
static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
unsigned long flags)
{
volatile unsigned int *a;
mb();
a = __ldcw_align(x);
while (__ldcw(a) == 0)
while (*a == 0)
if (flags & PSW_SM_I) {
local_irq_enable();
cpu_relax();
local_irq_disable();
} else
cpu_relax();
mb();
}
static inline void __raw_spin_unlock(raw_spinlock_t *x)
{
volatile unsigned int *a;
mb();
a = __ldcw_align(x);
*a = 1;
mb();
}
static inline int __raw_spin_trylock(raw_spinlock_t *x)
{
volatile unsigned int *a;
int ret;
mb();
a = __ldcw_align(x);
ret = __ldcw(a) != 0;
mb();
return ret;
}
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
* Linux rwlocks are unfair to writers; they can be starved for an indefinite
* time by readers. With care, they can also be taken in interrupt context.
*
* In the PA-RISC implementation, we have a spinlock and a counter.
* Readers use the lock to serialise their access to the counter (which
* records how many readers currently hold the lock).
* Writers hold the spinlock, preventing any readers or other writers from
* grabbing the rwlock.
*/
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
__raw_spin_lock_flags(&rw->lock, flags);
rw->counter++;
__raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
__raw_spin_lock_flags(&rw->lock, flags);
rw->counter--;
__raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
{
unsigned long flags;
retry:
local_irq_save(flags);
if (__raw_spin_trylock(&rw->lock)) {
rw->counter++;
__raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
return 1;
}
local_irq_restore(flags);
/* If write-locked, we fail to acquire the lock */
if (rw->counter < 0)
return 0;
/* Wait until we have a realistic chance at the lock */
while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0)
cpu_relax();
goto retry;
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
{
unsigned long flags;
retry:
local_irq_save(flags);
__raw_spin_lock_flags(&rw->lock, flags);
if (rw->counter != 0) {
__raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
while (rw->counter != 0)
cpu_relax();
goto retry;
}
rw->counter = -1; /* mark as write-locked */
mb();
local_irq_restore(flags);
}
static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
{
rw->counter = 0;
__raw_spin_unlock(&rw->lock);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
{
unsigned long flags;
int result = 0;
local_irq_save(flags);
if (__raw_spin_trylock(&rw->lock)) {
if (rw->counter == 0) {
rw->counter = -1;
result = 1;
} else {
/* Read-locked. Oh well. */
__raw_spin_unlock(&rw->lock);
}
}
local_irq_restore(flags);
return result;
}
/*
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
{
return rw->counter >= 0;
}
/*
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
{
return !rw->counter;
}
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */

View File

@@ -0,0 +1,21 @@
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
# define __RAW_SPIN_LOCK_UNLOCKED { 1 }
#else
volatile unsigned int lock[4];
# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#endif
} raw_spinlock_t;
typedef struct {
raw_spinlock_t lock;
volatile int counter;
} raw_rwlock_t;
#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 }
#endif

Some files were not shown because too many files have changed in this diff Show More