Merge branch 'master' of hera.kernel.org:/pub/scm/linux/kernel/git/kyle/parisc-2.6
* 'master' of hera.kernel.org:/pub/scm/linux/kernel/git/kyle/parisc-2.6: (29 commits) [PARISC] fix uninitialized variable warning in asm/rtc.h [PARISC] Port checkstack.pl to parisc [PARISC] Make palo target work when $obj != $src [PARISC] Zap unused variable warnings in pci.c [PARISC] Fix tests in palo target [PARISC] Fix palo target [PARISC] Restore palo target [PARISC] Attempt to clean up parisc/Makefile [PARISC] Fix infinite loop in /proc/iomem [PARISC] Quiet sysfs_create_link __must_check warnings in pdc_stable [PARISC] Squelch pci_enable_device __must_check warning in superio [PARISC] Kill off broken irqstack code [PARISC] Remove hardcoded uses of PAGE_SIZE [PARISC] Clean up pointless ASM_PAGE_SIZE_DIV use [PARISC] Kill off the last vestiges of ASM_PAGE_SIZE [PARISC] Kill off ASM_PAGE_SIZE use [PARISC] Beautify parisc vmlinux.lds.S [PARISC] Clean up a resource_size_t warning in sba_iommu [PARISC] Kill incorrect cast warning in unwinder [PARISC] Kill zone_to_nid printk warning ... Fixed trivial conflict in include/asm-parisc/tlbflush.h manually
This commit is contained in:
@@ -290,9 +290,6 @@ int main(void)
|
||||
DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
|
||||
DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
|
||||
DEFINE(ASM_PT_INITIAL, PT_INITIAL);
|
||||
DEFINE(ASM_PAGE_SIZE, PAGE_SIZE);
|
||||
DEFINE(ASM_PAGE_SIZE_DIV64, PAGE_SIZE/64);
|
||||
DEFINE(ASM_PAGE_SIZE_DIV128, PAGE_SIZE/128);
|
||||
BLANK();
|
||||
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
|
||||
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
|
||||
|
@@ -98,7 +98,6 @@
|
||||
* The "get_stack" macros are responsible for determining the
|
||||
* kernel stack value.
|
||||
*
|
||||
* For Faults:
|
||||
* If sr7 == 0
|
||||
* Already using a kernel stack, so call the
|
||||
* get_stack_use_r30 macro to push a pt_regs structure
|
||||
@@ -110,26 +109,6 @@
|
||||
* task pointer pointed to by cr30. Set the stack
|
||||
* pointer to point to the end of the task structure.
|
||||
*
|
||||
* For Interrupts:
|
||||
* If sr7 == 0
|
||||
* Already using a kernel stack, check to see if r30
|
||||
* is already pointing to the per processor interrupt
|
||||
* stack. If it is, call the get_stack_use_r30 macro
|
||||
* to push a pt_regs structure on the stack, and store
|
||||
* registers there. Otherwise, call get_stack_use_cr31
|
||||
* to get a pointer to the base of the interrupt stack
|
||||
* and push a pt_regs structure on that stack.
|
||||
* else
|
||||
* Need to set up a kernel stack, so call the
|
||||
* get_stack_use_cr30 macro to set up a pointer
|
||||
* to the pt_regs structure contained within the
|
||||
* task pointer pointed to by cr30. Set the stack
|
||||
* pointer to point to the end of the task structure.
|
||||
* N.B: We don't use the interrupt stack for the
|
||||
* first interrupt from userland, because signals/
|
||||
* resched's are processed when returning to userland,
|
||||
* and we can sleep in those cases.
|
||||
*
|
||||
* Note that we use shadowed registers for temps until
|
||||
* we can save %r26 and %r29. %r26 is used to preserve
|
||||
* %r8 (a shadowed register) which temporarily contained
|
||||
@@ -652,7 +631,7 @@
|
||||
|
||||
.text
|
||||
|
||||
.align 4096
|
||||
.align PAGE_SIZE
|
||||
|
||||
ENTRY(fault_vector_20)
|
||||
/* First vector is invalid (0) */
|
||||
@@ -904,7 +883,7 @@ ENDPROC(_switch_to)
|
||||
*
|
||||
*/
|
||||
|
||||
.align 4096
|
||||
.align PAGE_SIZE
|
||||
|
||||
ENTRY(syscall_exit_rfi)
|
||||
mfctl %cr30,%r16
|
||||
@@ -1086,23 +1065,13 @@ intr_do_preempt:
|
||||
|
||||
intr_extint:
|
||||
CMPIB=,n 0,%r16,1f
|
||||
|
||||
get_stack_use_cr30
|
||||
b,n 3f
|
||||
b,n 2f
|
||||
|
||||
1:
|
||||
#if 0 /* Interrupt Stack support not working yet! */
|
||||
mfctl %cr31,%r1
|
||||
copy %r30,%r17
|
||||
/* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
|
||||
DEPI 0,31,15,%r17
|
||||
CMPB=,n %r1,%r17,2f
|
||||
get_stack_use_cr31
|
||||
b,n 3f
|
||||
#endif
|
||||
2:
|
||||
get_stack_use_r30
|
||||
|
||||
3:
|
||||
2:
|
||||
save_specials %r29
|
||||
virt_map
|
||||
save_general %r29
|
||||
|
@@ -95,7 +95,7 @@ $bss_loop:
|
||||
|
||||
1:
|
||||
stw %r3,0(%r4)
|
||||
ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
|
||||
ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
|
||||
addib,> -1,%r1,1b
|
||||
#if PT_NLEVELS == 3
|
||||
ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
|
||||
@@ -128,10 +128,6 @@ $pgt_fill_loop:
|
||||
/* And the stack pointer too */
|
||||
ldo THREAD_SZ_ALGN(%r6),%sp
|
||||
|
||||
/* And the interrupt stack */
|
||||
load32 interrupt_stack,%r6
|
||||
mtctl %r6,%cr31
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Set the smp rendevous address into page zero.
|
||||
** It would be safer to do this in init_smp_config() but
|
||||
|
@@ -55,13 +55,13 @@
|
||||
* IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
|
||||
*/
|
||||
|
||||
.align 4096
|
||||
.align PAGE_SIZE
|
||||
hpmc_stack:
|
||||
.block 16384
|
||||
|
||||
#define HPMC_IODC_BUF_SIZE 0x8000
|
||||
|
||||
.align 4096
|
||||
.align PAGE_SIZE
|
||||
hpmc_iodc_buf:
|
||||
.block HPMC_IODC_BUF_SIZE
|
||||
|
||||
|
@@ -49,7 +49,6 @@ EXPORT_SYMBOL(init_mm);
|
||||
* way process stacks are handled. This is done by having a special
|
||||
* "init_task" linker map entry..
|
||||
*/
|
||||
unsigned char interrupt_stack[ISTACK_SIZE] __attribute__ ((section("init_istack"), aligned(4096)));
|
||||
union thread_union init_thread_union
|
||||
__attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) =
|
||||
{ INIT_THREAD_INFO(init_task) };
|
||||
|
@@ -289,7 +289,7 @@ ENTRY(copy_user_page_asm)
|
||||
*/
|
||||
|
||||
ldd 0(%r25), %r19
|
||||
ldi ASM_PAGE_SIZE_DIV128, %r1
|
||||
ldi (PAGE_SIZE / 128), %r1
|
||||
|
||||
ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */
|
||||
ldw 128(%r25), %r0 /* prefetch 2 */
|
||||
@@ -355,7 +355,7 @@ ENTRY(copy_user_page_asm)
|
||||
* use ldd/std on a 32 bit kernel.
|
||||
*/
|
||||
ldw 0(%r25), %r19
|
||||
ldi ASM_PAGE_SIZE_DIV64, %r1
|
||||
ldi (PAGE_SIZE / 64), %r1
|
||||
|
||||
1:
|
||||
ldw 4(%r25), %r20
|
||||
@@ -553,7 +553,7 @@ ENTRY(__clear_user_page_asm)
|
||||
pdtlb 0(%r28)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
ldi ASM_PAGE_SIZE_DIV128, %r1
|
||||
ldi (PAGE_SIZE / 128), %r1
|
||||
|
||||
/* PREFETCH (Write) has not (yet) been proven to help here */
|
||||
/* #define PREFETCHW_OP ldd 256(%0), %r0 */
|
||||
@@ -578,7 +578,7 @@ ENTRY(__clear_user_page_asm)
|
||||
ldo 128(%r28), %r28
|
||||
|
||||
#else /* ! CONFIG_64BIT */
|
||||
ldi ASM_PAGE_SIZE_DIV64, %r1
|
||||
ldi (PAGE_SIZE / 64), %r1
|
||||
|
||||
1:
|
||||
stw %r0, 0(%r28)
|
||||
|
@@ -122,31 +122,9 @@ EXPORT_SYMBOL($$divI_12);
|
||||
EXPORT_SYMBOL($$divI_14);
|
||||
EXPORT_SYMBOL($$divI_15);
|
||||
|
||||
extern void __ashrdi3(void);
|
||||
extern void __ashldi3(void);
|
||||
extern void __lshrdi3(void);
|
||||
extern void __muldi3(void);
|
||||
|
||||
EXPORT_SYMBOL(__ashrdi3);
|
||||
EXPORT_SYMBOL(__ashldi3);
|
||||
EXPORT_SYMBOL(__lshrdi3);
|
||||
EXPORT_SYMBOL(__muldi3);
|
||||
|
||||
asmlinkage void * __canonicalize_funcptr_for_compare(void *);
|
||||
EXPORT_SYMBOL(__canonicalize_funcptr_for_compare);
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
extern void __divdi3(void);
|
||||
extern void __udivdi3(void);
|
||||
extern void __umoddi3(void);
|
||||
extern void __moddi3(void);
|
||||
|
||||
EXPORT_SYMBOL(__divdi3);
|
||||
EXPORT_SYMBOL(__udivdi3);
|
||||
EXPORT_SYMBOL(__umoddi3);
|
||||
EXPORT_SYMBOL(__moddi3);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
extern void $$dyncall(void);
|
||||
EXPORT_SYMBOL($$dyncall);
|
||||
|
@@ -569,11 +569,10 @@ static void *fail_alloc_consistent(struct device *dev, size_t size,
|
||||
static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
void *addr = NULL;
|
||||
void *addr;
|
||||
|
||||
/* rely on kmalloc to be cacheline aligned */
|
||||
addr = kmalloc(size, flag);
|
||||
if(addr)
|
||||
addr = (void *)__get_free_pages(flag, get_order(size));
|
||||
if (addr)
|
||||
*dma_handle = (dma_addr_t)virt_to_phys(addr);
|
||||
|
||||
return addr;
|
||||
@@ -582,7 +581,7 @@ static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
|
||||
static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t iova)
|
||||
{
|
||||
kfree(vaddr);
|
||||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@@ -194,37 +194,13 @@ void __init pcibios_init_bus(struct pci_bus *bus)
|
||||
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
|
||||
}
|
||||
|
||||
|
||||
/* KLUGE: Link the child and parent resources - generic PCI didn't */
|
||||
static void
|
||||
pcibios_link_hba_resources( struct resource *hba_res, struct resource *r)
|
||||
{
|
||||
if (!r->parent) {
|
||||
printk(KERN_EMERG "PCI: resource not parented! [%p-%p]\n",
|
||||
(void*) r->start, (void*) r->end);
|
||||
r->parent = hba_res;
|
||||
|
||||
/* reverse link is harder *sigh* */
|
||||
if (r->parent->child) {
|
||||
if (r->parent->sibling) {
|
||||
struct resource *next = r->parent->sibling;
|
||||
while (next->sibling)
|
||||
next = next->sibling;
|
||||
next->sibling = r;
|
||||
} else {
|
||||
r->parent->sibling = r;
|
||||
}
|
||||
} else
|
||||
r->parent->child = r;
|
||||
}
|
||||
}
|
||||
|
||||
/* called by drivers/pci/setup-bus.c:pci_setup_bridge(). */
|
||||
void __devinit pcibios_resource_to_bus(struct pci_dev *dev,
|
||||
struct pci_bus_region *region, struct resource *res)
|
||||
{
|
||||
struct pci_bus *bus = dev->bus;
|
||||
struct pci_hba_data *hba = HBA_DATA(bus->bridge->platform_data);
|
||||
#ifdef CONFIG_64BIT
|
||||
struct pci_hba_data *hba = HBA_DATA(dev->bus->bridge->platform_data);
|
||||
#endif
|
||||
|
||||
if (res->flags & IORESOURCE_IO) {
|
||||
/*
|
||||
@@ -243,23 +219,15 @@ void __devinit pcibios_resource_to_bus(struct pci_dev *dev,
|
||||
}
|
||||
|
||||
DBG_RES("pcibios_resource_to_bus(%02x %s [%lx,%lx])\n",
|
||||
bus->number, res->flags & IORESOURCE_IO ? "IO" : "MEM",
|
||||
dev->bus->number, res->flags & IORESOURCE_IO ? "IO" : "MEM",
|
||||
region->start, region->end);
|
||||
|
||||
/* KLUGE ALERT
|
||||
** if this resource isn't linked to a "parent", then it seems
|
||||
** to be a child of the HBA - lets link it in.
|
||||
*/
|
||||
pcibios_link_hba_resources(&hba->io_space, bus->resource[0]);
|
||||
pcibios_link_hba_resources(&hba->lmmio_space, bus->resource[1]);
|
||||
}
|
||||
|
||||
void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
|
||||
struct pci_bus_region *region)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
struct pci_bus *bus = dev->bus;
|
||||
struct pci_hba_data *hba = HBA_DATA(bus->bridge->platform_data);
|
||||
struct pci_hba_data *hba = HBA_DATA(dev->bus->bridge->platform_data);
|
||||
#endif
|
||||
|
||||
if (res->flags & IORESOURCE_MEM) {
|
||||
|
@@ -82,7 +82,12 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
|
||||
unsigned long cpuid;
|
||||
struct cpuinfo_parisc *p;
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
#ifdef CONFIG_SMP
|
||||
if (num_online_cpus() >= NR_CPUS) {
|
||||
printk(KERN_INFO "num_online_cpus() >= NR_CPUS\n");
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
if (boot_cpu_data.cpu_count > 0) {
|
||||
printk(KERN_INFO "CONFIG_SMP=n ignoring additional CPUs\n");
|
||||
return 1;
|
||||
|
@@ -432,22 +432,10 @@ smp_cpu_init(int cpunum)
|
||||
void __init smp_callin(void)
|
||||
{
|
||||
int slave_id = cpu_now_booting;
|
||||
#if 0
|
||||
void *istack;
|
||||
#endif
|
||||
|
||||
smp_cpu_init(slave_id);
|
||||
preempt_disable();
|
||||
|
||||
#if 0 /* NOT WORKING YET - see entry.S */
|
||||
istack = (void *)__get_free_pages(GFP_KERNEL,ISTACK_ORDER);
|
||||
if (istack == NULL) {
|
||||
printk(KERN_CRIT "Failed to allocate interrupt stack for cpu %d\n",slave_id);
|
||||
BUG();
|
||||
}
|
||||
mtctl(istack,31);
|
||||
#endif
|
||||
|
||||
flush_cache_all_local(); /* start with known state */
|
||||
flush_tlb_all_local(NULL);
|
||||
|
||||
|
@@ -473,3 +473,10 @@ long sys32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char __user *buf,
|
||||
return sys_lookup_dcookie((u64)cookie_high << 32 | cookie_low,
|
||||
buf, len);
|
||||
}
|
||||
|
||||
asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
|
||||
u32 lenhi, u32 lenlo)
|
||||
{
|
||||
return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
|
||||
((loff_t)lenhi << 32) | lenlo);
|
||||
}
|
||||
|
@@ -10,6 +10,7 @@
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/psw.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/assembly.h>
|
||||
@@ -38,7 +39,7 @@
|
||||
* pointers.
|
||||
*/
|
||||
|
||||
.align ASM_PAGE_SIZE
|
||||
.align PAGE_SIZE
|
||||
ENTRY(linux_gateway_page)
|
||||
|
||||
/* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
|
||||
@@ -597,7 +598,7 @@ cas_action:
|
||||
|
||||
|
||||
/* Make sure nothing else is placed on this page */
|
||||
.align ASM_PAGE_SIZE
|
||||
.align PAGE_SIZE
|
||||
END(linux_gateway_page)
|
||||
ENTRY(end_linux_gateway_page)
|
||||
|
||||
@@ -608,7 +609,7 @@ ENTRY(end_linux_gateway_page)
|
||||
|
||||
.section .rodata,"a"
|
||||
|
||||
.align ASM_PAGE_SIZE
|
||||
.align PAGE_SIZE
|
||||
/* Light-weight-syscall table */
|
||||
/* Start of lws table. */
|
||||
ENTRY(lws_table)
|
||||
@@ -617,13 +618,13 @@ ENTRY(lws_table)
|
||||
END(lws_table)
|
||||
/* End of lws table */
|
||||
|
||||
.align ASM_PAGE_SIZE
|
||||
.align PAGE_SIZE
|
||||
ENTRY(sys_call_table)
|
||||
#include "syscall_table.S"
|
||||
END(sys_call_table)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
.align ASM_PAGE_SIZE
|
||||
.align PAGE_SIZE
|
||||
ENTRY(sys_call_table64)
|
||||
#define SYSCALL_TABLE_64BIT
|
||||
#include "syscall_table.S"
|
||||
@@ -636,7 +637,7 @@ END(sys_call_table64)
|
||||
will use this set of locks
|
||||
*/
|
||||
.section .data
|
||||
.align 4096
|
||||
.align PAGE_SIZE
|
||||
ENTRY(lws_lock_start)
|
||||
/* lws locks */
|
||||
.align 16
|
||||
|
@@ -403,6 +403,7 @@
|
||||
ENTRY_COMP(signalfd)
|
||||
ENTRY_COMP(timerfd)
|
||||
ENTRY_SAME(eventfd)
|
||||
ENTRY_COMP(fallocate) /* 305 */
|
||||
|
||||
/* Nothing yet */
|
||||
|
||||
|
@@ -189,16 +189,14 @@ static struct clocksource clocksource_cr16 = {
|
||||
#ifdef CONFIG_SMP
|
||||
int update_cr16_clocksource(void)
|
||||
{
|
||||
int change = 0;
|
||||
|
||||
/* since the cr16 cycle counters are not synchronized across CPUs,
|
||||
we'll check if we should switch to a safe clocksource: */
|
||||
if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
|
||||
clocksource_change_rating(&clocksource_cr16, 0);
|
||||
change = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return change;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
int update_cr16_clocksource(void)
|
||||
|
@@ -209,8 +209,8 @@ static int unwind_init(void)
|
||||
|
||||
static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
|
||||
{
|
||||
void handle_interruption(int, struct pt_regs *);
|
||||
static unsigned long *hi = (unsigned long)&handle_interruption;
|
||||
extern void handle_interruption(int, struct pt_regs *);
|
||||
static unsigned long *hi = (unsigned long *)&handle_interruption;
|
||||
|
||||
if (pc == get_func_addr(hi)) {
|
||||
struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
|
||||
|
@@ -46,168 +46,211 @@ jiffies = jiffies_64;
|
||||
#endif
|
||||
SECTIONS
|
||||
{
|
||||
. = KERNEL_BINARY_TEXT_START;
|
||||
|
||||
. = KERNEL_BINARY_TEXT_START;
|
||||
|
||||
_text = .; /* Text and read-only data */
|
||||
.text ALIGN(16) : {
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
*(.text.do_softirq)
|
||||
*(.text.sys_exit)
|
||||
*(.text.do_sigaltstack)
|
||||
*(.text.do_fork)
|
||||
*(.text.*)
|
||||
*(.fixup)
|
||||
*(.lock.text) /* out-of-line lock text */
|
||||
*(.gnu.warning)
|
||||
_text = .; /* Text and read-only data */
|
||||
.text ALIGN(16) : {
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
*(.text.do_softirq)
|
||||
*(.text.sys_exit)
|
||||
*(.text.do_sigaltstack)
|
||||
*(.text.do_fork)
|
||||
*(.text.*)
|
||||
*(.fixup)
|
||||
*(.lock.text) /* out-of-line lock text */
|
||||
*(.gnu.warning)
|
||||
} = 0
|
||||
/* End of text section */
|
||||
_etext = .;
|
||||
|
||||
_etext = .; /* End of text section */
|
||||
RODATA
|
||||
BUG_TABLE
|
||||
|
||||
RODATA
|
||||
|
||||
BUG_TABLE
|
||||
|
||||
/* writeable */
|
||||
. = ALIGN(ASM_PAGE_SIZE); /* Make sure this is page aligned so
|
||||
that we can properly leave these
|
||||
as writable */
|
||||
data_start = .;
|
||||
|
||||
. = ALIGN(16); /* Exception table */
|
||||
__start___ex_table = .;
|
||||
__ex_table : { *(__ex_table) }
|
||||
__stop___ex_table = .;
|
||||
|
||||
NOTES
|
||||
|
||||
__start___unwind = .; /* unwind info */
|
||||
.PARISC.unwind : { *(.PARISC.unwind) }
|
||||
__stop___unwind = .;
|
||||
|
||||
/* rarely changed data like cpu maps */
|
||||
. = ALIGN(16);
|
||||
.data.read_mostly : { *(.data.read_mostly) }
|
||||
|
||||
. = ALIGN(L1_CACHE_BYTES);
|
||||
.data : { /* Data */
|
||||
DATA_DATA
|
||||
CONSTRUCTORS
|
||||
/* writeable */
|
||||
/* Make sure this is page aligned so
|
||||
* that we can properly leave these
|
||||
* as writable
|
||||
*/
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
data_start = .;
|
||||
. = ALIGN(16);
|
||||
/* Exception table */
|
||||
__ex_table : {
|
||||
__start___ex_table = .;
|
||||
*(__ex_table)
|
||||
__stop___ex_table = .;
|
||||
}
|
||||
|
||||
. = ALIGN(L1_CACHE_BYTES);
|
||||
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
|
||||
NOTES
|
||||
|
||||
/* PA-RISC locks requires 16-byte alignment */
|
||||
. = ALIGN(16);
|
||||
.data.lock_aligned : { *(.data.lock_aligned) }
|
||||
|
||||
. = ALIGN(ASM_PAGE_SIZE);
|
||||
/* nosave data is really only used for software suspend...it's here
|
||||
* just in case we ever implement it */
|
||||
__nosave_begin = .;
|
||||
.data_nosave : { *(.data.nosave) }
|
||||
. = ALIGN(ASM_PAGE_SIZE);
|
||||
__nosave_end = .;
|
||||
|
||||
_edata = .; /* End of data section */
|
||||
|
||||
__bss_start = .; /* BSS */
|
||||
/* page table entries need to be PAGE_SIZE aligned */
|
||||
. = ALIGN(ASM_PAGE_SIZE);
|
||||
.data.vmpages : {
|
||||
*(.data.vm0.pmd)
|
||||
*(.data.vm0.pgd)
|
||||
*(.data.vm0.pte)
|
||||
/* unwind info */
|
||||
.PARISC.unwind : {
|
||||
__start___unwind = .;
|
||||
*(.PARISC.unwind)
|
||||
__stop___unwind = .;
|
||||
}
|
||||
.bss : { *(.bss) *(COMMON) }
|
||||
__bss_stop = .;
|
||||
|
||||
/* rarely changed data like cpu maps */
|
||||
. = ALIGN(16);
|
||||
.data.read_mostly : {
|
||||
*(.data.read_mostly)
|
||||
}
|
||||
|
||||
. = ALIGN(L1_CACHE_BYTES);
|
||||
/* Data */
|
||||
.data : {
|
||||
DATA_DATA
|
||||
CONSTRUCTORS
|
||||
}
|
||||
|
||||
. = ALIGN(L1_CACHE_BYTES);
|
||||
.data.cacheline_aligned : {
|
||||
*(.data.cacheline_aligned)
|
||||
}
|
||||
|
||||
/* PA-RISC locks requires 16-byte alignment */
|
||||
. = ALIGN(16);
|
||||
.data.lock_aligned : {
|
||||
*(.data.lock_aligned)
|
||||
}
|
||||
|
||||
/* nosave data is really only used for software suspend...it's here
|
||||
* just in case we ever implement it
|
||||
*/
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__nosave_begin = .;
|
||||
.data_nosave : {
|
||||
*(.data.nosave)
|
||||
}
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__nosave_end = .;
|
||||
|
||||
/* End of data section */
|
||||
_edata = .;
|
||||
|
||||
/* BSS */
|
||||
__bss_start = .;
|
||||
/* page table entries need to be PAGE_SIZE aligned */
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
.data.vmpages : {
|
||||
*(.data.vm0.pmd)
|
||||
*(.data.vm0.pgd)
|
||||
*(.data.vm0.pte)
|
||||
}
|
||||
.bss : {
|
||||
*(.bss)
|
||||
*(COMMON)
|
||||
}
|
||||
__bss_stop = .;
|
||||
|
||||
|
||||
/* assembler code expects init_task to be 16k aligned */
|
||||
. = ALIGN(16384); /* init_task */
|
||||
.data.init_task : { *(.data.init_task) }
|
||||
|
||||
/* The interrupt stack is currently partially coded, but not yet
|
||||
* implemented */
|
||||
. = ALIGN(16384);
|
||||
init_istack : { *(init_istack) }
|
||||
/* assembler code expects init_task to be 16k aligned */
|
||||
. = ALIGN(16384);
|
||||
/* init_task */
|
||||
.data.init_task : {
|
||||
*(.data.init_task)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
. = ALIGN(16); /* Linkage tables */
|
||||
.opd : { *(.opd) } PROVIDE (__gp = .);
|
||||
.plt : { *(.plt) }
|
||||
.dlt : { *(.dlt) }
|
||||
. = ALIGN(16);
|
||||
/* Linkage tables */
|
||||
.opd : {
|
||||
*(.opd)
|
||||
} PROVIDE (__gp = .);
|
||||
.plt : {
|
||||
*(.plt)
|
||||
}
|
||||
.dlt : {
|
||||
*(.dlt)
|
||||
}
|
||||
#endif
|
||||
|
||||
/* reserve space for interrupt stack by aligning __init* to 16k */
|
||||
. = ALIGN(16384);
|
||||
__init_begin = .;
|
||||
.init.text : {
|
||||
_sinittext = .;
|
||||
*(.init.text)
|
||||
_einittext = .;
|
||||
}
|
||||
.init.data : { *(.init.data) }
|
||||
. = ALIGN(16);
|
||||
__setup_start = .;
|
||||
.init.setup : { *(.init.setup) }
|
||||
__setup_end = .;
|
||||
__initcall_start = .;
|
||||
.initcall.init : {
|
||||
INITCALLS
|
||||
}
|
||||
__initcall_end = .;
|
||||
__con_initcall_start = .;
|
||||
.con_initcall.init : { *(.con_initcall.init) }
|
||||
__con_initcall_end = .;
|
||||
SECURITY_INIT
|
||||
/* alternate instruction replacement. This is a mechanism x86 uses
|
||||
* to detect the CPU type and replace generic instruction sequences
|
||||
* with CPU specific ones. We don't currently do this in PA, but
|
||||
* it seems like a good idea... */
|
||||
. = ALIGN(4);
|
||||
__alt_instructions = .;
|
||||
.altinstructions : { *(.altinstructions) }
|
||||
__alt_instructions_end = .;
|
||||
.altinstr_replacement : { *(.altinstr_replacement) }
|
||||
/* .exit.text is discard at runtime, not link time, to deal with references
|
||||
from .altinstructions and .eh_frame */
|
||||
.exit.text : { *(.exit.text) }
|
||||
.exit.data : { *(.exit.data) }
|
||||
/* reserve space for interrupt stack by aligning __init* to 16k */
|
||||
. = ALIGN(16384);
|
||||
__init_begin = .;
|
||||
.init.text : {
|
||||
_sinittext = .;
|
||||
*(.init.text)
|
||||
_einittext = .;
|
||||
}
|
||||
.init.data : {
|
||||
*(.init.data)
|
||||
}
|
||||
. = ALIGN(16);
|
||||
.init.setup : {
|
||||
__setup_start = .;
|
||||
*(.init.setup)
|
||||
__setup_end = .;
|
||||
}
|
||||
.initcall.init : {
|
||||
__initcall_start = .;
|
||||
INITCALLS
|
||||
__initcall_end = .;
|
||||
}
|
||||
.con_initcall.init : {
|
||||
__con_initcall_start = .;
|
||||
*(.con_initcall.init)
|
||||
__con_initcall_end = .;
|
||||
}
|
||||
SECURITY_INIT
|
||||
|
||||
/* alternate instruction replacement. This is a mechanism x86 uses
|
||||
* to detect the CPU type and replace generic instruction sequences
|
||||
* with CPU specific ones. We don't currently do this in PA, but
|
||||
* it seems like a good idea...
|
||||
*/
|
||||
. = ALIGN(4);
|
||||
.altinstructions : {
|
||||
__alt_instructions = .;
|
||||
*(.altinstructions)
|
||||
__alt_instructions_end = .;
|
||||
}
|
||||
.altinstr_replacement : {
|
||||
*(.altinstr_replacement)
|
||||
}
|
||||
|
||||
/* .exit.text is discard at runtime, not link time, to deal with references
|
||||
* from .altinstructions and .eh_frame
|
||||
*/
|
||||
.exit.text : {
|
||||
*(.exit.text)
|
||||
}
|
||||
.exit.data : {
|
||||
*(.exit.data)
|
||||
}
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
. = ALIGN(ASM_PAGE_SIZE);
|
||||
__initramfs_start = .;
|
||||
.init.ramfs : { *(.init.ramfs) }
|
||||
__initramfs_end = .;
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
.init.ramfs : {
|
||||
__initramfs_start = .;
|
||||
*(.init.ramfs)
|
||||
__initramfs_end = .;
|
||||
}
|
||||
#endif
|
||||
|
||||
PERCPU(ASM_PAGE_SIZE)
|
||||
PERCPU(PAGE_SIZE)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_end = .;
|
||||
/* freed after init ends here */
|
||||
_end = . ;
|
||||
|
||||
. = ALIGN(ASM_PAGE_SIZE);
|
||||
__init_end = .;
|
||||
/* freed after init ends here */
|
||||
|
||||
_end = . ;
|
||||
|
||||
/* Sections to be discarded */
|
||||
/DISCARD/ : {
|
||||
*(.exitcall.exit)
|
||||
/* Sections to be discarded */
|
||||
/DISCARD/ : {
|
||||
*(.exitcall.exit)
|
||||
#ifdef CONFIG_64BIT
|
||||
/* temporary hack until binutils is fixed to not emit these
|
||||
for static binaries */
|
||||
*(.interp)
|
||||
*(.dynsym)
|
||||
*(.dynstr)
|
||||
*(.dynamic)
|
||||
*(.hash)
|
||||
*(.gnu.hash)
|
||||
/* temporary hack until binutils is fixed to not emit these
|
||||
* for static binaries
|
||||
*/
|
||||
*(.interp)
|
||||
*(.dynsym)
|
||||
*(.dynstr)
|
||||
*(.dynamic)
|
||||
*(.hash)
|
||||
*(.gnu.hash)
|
||||
#endif
|
||||
}
|
||||
|
||||
STABS_DEBUG
|
||||
.note 0 : { *(.note) }
|
||||
|
||||
STABS_DEBUG
|
||||
.note 0 : { *(.note) }
|
||||
}
|
||||
|
Reference in New Issue
Block a user