Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: (27 commits) arch/tile: support newer binutils assembler shift semantics arch/tile: fix deadlock bugs in rwlock implementation drivers/edac: provide support for tile architecture tile on-chip network driver: sync up with latest fixes arch/tile: support 4KB page size as well as 64KB arch/tile: add some more VMSPLIT options and use consistent naming arch/tile: fix some comments and whitespace arch/tile: export some additional module symbols arch/tile: enhance existing finv_buffer_remote() routine arch/tile: fix two bugs in the backtracer code arch/tile: use extended assembly to inline __mb_incoherent() arch/tile: use a cleaner technique to enable interrupt for cpu_idle() arch/tile: sync up with <arch/sim.h> and <arch/sim_def.h> changes arch/tile: fix reversed test of strict_strtol() return value arch/tile: avoid a simulator warning during bootup arch/tile: export <asm/hardwall.h> to userspace arch/tile: warn and retry if an IPI is not accepted by the target cpu arch/tile: stop disabling INTCTRL_1 interrupts during hypervisor downcalls arch/tile: fix __ndelay etc to work better arch/tile: bug fix: exec'ed task thought it was still single-stepping ... Fix up trivial conflict in arch/tile/kernel/vmlinux.lds.S (percpu alignment vs section naming convention fix)
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
include include/asm-generic/Kbuild.asm
|
||||
|
||||
header-y += ucontext.h
|
||||
header-y += hardwall.h
|
||||
|
@@ -32,7 +32,7 @@
|
||||
*/
|
||||
static inline int atomic_read(const atomic_t *v)
|
||||
{
|
||||
return v->counter;
|
||||
return ACCESS_ONCE(v->counter);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -122,7 +122,7 @@ static inline int test_and_change_bit(unsigned nr,
|
||||
return (_atomic_xor(addr, mask) & mask) != 0;
|
||||
}
|
||||
|
||||
/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic.h>. */
|
||||
/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic_32.h>. */
|
||||
#define smp_mb__before_clear_bit() smp_mb()
|
||||
#define smp_mb__after_clear_bit() do {} while (0)
|
||||
|
||||
|
@@ -40,7 +40,7 @@
|
||||
#define INTERNODE_CACHE_BYTES L2_CACHE_BYTES
|
||||
|
||||
/* Group together read-mostly things to avoid cache false sharing */
|
||||
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
|
||||
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
|
||||
|
||||
/*
|
||||
* Attribute for data that is kept read/write coherent until the end of
|
||||
|
@@ -138,55 +138,12 @@ static inline void finv_buffer(void *buffer, size_t size)
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush & invalidate a VA range that is homed remotely on a single core,
|
||||
* waiting until the memory controller holds the flushed values.
|
||||
* Flush and invalidate a VA range that is homed remotely, waiting
|
||||
* until the memory controller holds the flushed values. If "hfh" is
|
||||
* true, we will do a more expensive flush involving additional loads
|
||||
* to make sure we have touched all the possible home cpus of a buffer
|
||||
* that is homed with "hash for home".
|
||||
*/
|
||||
static inline void finv_buffer_remote(void *buffer, size_t size)
|
||||
{
|
||||
char *p;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Flush and invalidate the buffer out of the local L1/L2
|
||||
* and request the home cache to flush and invalidate as well.
|
||||
*/
|
||||
__finv_buffer(buffer, size);
|
||||
|
||||
/*
|
||||
* Wait for the home cache to acknowledge that it has processed
|
||||
* all the flush-and-invalidate requests. This does not mean
|
||||
* that the flushed data has reached the memory controller yet,
|
||||
* but it does mean the home cache is processing the flushes.
|
||||
*/
|
||||
__insn_mf();
|
||||
|
||||
/*
|
||||
* Issue a load to the last cache line, which can't complete
|
||||
* until all the previously-issued flushes to the same memory
|
||||
* controller have also completed. If we weren't striping
|
||||
* memory, that one load would be sufficient, but since we may
|
||||
* be, we also need to back up to the last load issued to
|
||||
* another memory controller, which would be the point where
|
||||
* we crossed an 8KB boundary (the granularity of striping
|
||||
* across memory controllers). Keep backing up and doing this
|
||||
* until we are before the beginning of the buffer, or have
|
||||
* hit all the controllers.
|
||||
*/
|
||||
for (i = 0, p = (char *)buffer + size - 1;
|
||||
i < (1 << CHIP_LOG_NUM_MSHIMS()) && p >= (char *)buffer;
|
||||
++i) {
|
||||
const unsigned long STRIPE_WIDTH = 8192;
|
||||
|
||||
/* Force a load instruction to issue. */
|
||||
*(volatile char *)p;
|
||||
|
||||
/* Jump to end of previous stripe. */
|
||||
p -= STRIPE_WIDTH;
|
||||
p = (char *)((unsigned long)p | (STRIPE_WIDTH - 1));
|
||||
}
|
||||
|
||||
/* Wait for the loads (and thus flushes) to have completed. */
|
||||
__insn_mf();
|
||||
}
|
||||
void finv_buffer_remote(void *buffer, size_t size, int hfh);
|
||||
|
||||
#endif /* _ASM_TILE_CACHEFLUSH_H */
|
||||
|
29
arch/tile/include/asm/edac.h
Normal file
29
arch/tile/include/asm/edac.h
Normal file
@@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_EDAC_H
|
||||
#define _ASM_TILE_EDAC_H
|
||||
|
||||
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
|
||||
|
||||
static inline void atomic_scrub(void *va, u32 size)
|
||||
{
|
||||
/*
|
||||
* These is nothing to be done here because CE is
|
||||
* corrected by the mshim.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
#endif /* _ASM_TILE_EDAC_H */
|
@@ -54,7 +54,7 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
set_pte_order(ptep, pte, HUGETLB_PAGE_ORDER);
|
||||
set_pte(ptep, pte);
|
||||
}
|
||||
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
|
@@ -18,12 +18,24 @@
|
||||
#include <arch/interrupts.h>
|
||||
#include <arch/chip.h>
|
||||
|
||||
#if !defined(__tilegx__) && defined(__ASSEMBLY__)
|
||||
|
||||
/*
|
||||
* The set of interrupts we want to allow when interrupts are nominally
|
||||
* disabled. The remainder are effectively "NMI" interrupts from
|
||||
* the point of view of the generic Linux code. Note that synchronous
|
||||
* interrupts (aka "non-queued") are not blocked by the mask in any case.
|
||||
*/
|
||||
#if CHIP_HAS_AUX_PERF_COUNTERS()
|
||||
#define LINUX_MASKABLE_INTERRUPTS_HI \
|
||||
(~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
|
||||
#else
|
||||
#define LINUX_MASKABLE_INTERRUPTS_HI \
|
||||
(~(INT_MASK_HI(INT_PERF_COUNT)))
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
#if CHIP_HAS_AUX_PERF_COUNTERS()
|
||||
#define LINUX_MASKABLE_INTERRUPTS \
|
||||
(~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT)))
|
||||
@@ -32,6 +44,8 @@
|
||||
(~(INT_MASK(INT_PERF_COUNT)))
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */
|
||||
@@ -224,11 +238,11 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
|
||||
#define IRQ_DISABLE(tmp0, tmp1) \
|
||||
{ \
|
||||
movei tmp0, -1; \
|
||||
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \
|
||||
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \
|
||||
}; \
|
||||
{ \
|
||||
mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \
|
||||
auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \
|
||||
auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS_HI) \
|
||||
}; \
|
||||
mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1
|
||||
|
||||
|
@@ -16,10 +16,11 @@
|
||||
#define _ASM_TILE_PAGE_H
|
||||
|
||||
#include <linux/const.h>
|
||||
#include <hv/pagesize.h>
|
||||
|
||||
/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
|
||||
#define PAGE_SHIFT 16
|
||||
#define HPAGE_SHIFT 24
|
||||
#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
|
||||
#define HPAGE_SHIFT HV_LOG2_PAGE_SIZE_LARGE
|
||||
|
||||
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
|
||||
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
|
||||
@@ -29,25 +30,18 @@
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/*
|
||||
* If the Kconfig doesn't specify, set a maximum zone order that
|
||||
* is enough so that we can create huge pages from small pages given
|
||||
* the respective sizes of the two page types. See <linux/mmzone.h>.
|
||||
*/
|
||||
#ifndef CONFIG_FORCE_MAX_ZONEORDER
|
||||
#define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1)
|
||||
#endif
|
||||
|
||||
#include <hv/hypervisor.h>
|
||||
#include <arch/chip.h>
|
||||
|
||||
/*
|
||||
* The {,H}PAGE_SHIFT values must match the HV_LOG2_PAGE_SIZE_xxx
|
||||
* definitions in <hv/hypervisor.h>. We validate this at build time
|
||||
* here, and again at runtime during early boot. We provide a
|
||||
* separate definition since userspace doesn't have <hv/hypervisor.h>.
|
||||
*
|
||||
* Be careful to distinguish PAGE_SHIFT from HV_PTE_INDEX_PFN, since
|
||||
* they are the same on i386 but not TILE.
|
||||
*/
|
||||
#if HV_LOG2_PAGE_SIZE_SMALL != PAGE_SHIFT
|
||||
# error Small page size mismatch in Linux
|
||||
#endif
|
||||
#if HV_LOG2_PAGE_SIZE_LARGE != HPAGE_SHIFT
|
||||
# error Huge page size mismatch in Linux
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
@@ -81,12 +75,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
|
||||
* Hypervisor page tables are made of the same basic structure.
|
||||
*/
|
||||
|
||||
typedef __u64 pteval_t;
|
||||
typedef __u64 pmdval_t;
|
||||
typedef __u64 pudval_t;
|
||||
typedef __u64 pgdval_t;
|
||||
typedef __u64 pgprotval_t;
|
||||
|
||||
typedef HV_PTE pte_t;
|
||||
typedef HV_PTE pgd_t;
|
||||
typedef HV_PTE pgprot_t;
|
||||
|
@@ -41,9 +41,9 @@
|
||||
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
set_pte_order(pmdp, pmd, L2_USER_PGTABLE_ORDER);
|
||||
set_pte(pmdp, pmd);
|
||||
#else
|
||||
set_pte_order(&pmdp->pud.pgd, pmd.pud.pgd, L2_USER_PGTABLE_ORDER);
|
||||
set_pte(&pmdp->pud.pgd, pmd.pud.pgd);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -100,6 +100,9 @@ pte_t *get_prealloc_pte(unsigned long pfn);
|
||||
/* During init, we can shatter kernel huge pages if needed. */
|
||||
void shatter_pmd(pmd_t *pmd);
|
||||
|
||||
/* After init, a more complex technique is required. */
|
||||
void shatter_huge_page(unsigned long addr);
|
||||
|
||||
#ifdef __tilegx__
|
||||
/* We share a single page allocator for both L1 and L2 page tables. */
|
||||
#if HV_L1_SIZE != HV_L2_SIZE
|
||||
|
@@ -233,15 +233,23 @@ static inline void __pte_clear(pte_t *ptep)
|
||||
#define pgd_ERROR(e) \
|
||||
pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e))
|
||||
|
||||
/* Return PA and protection info for a given kernel VA. */
|
||||
int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte);
|
||||
|
||||
/*
|
||||
* set_pte_order() sets the given PTE and also sanity-checks the
|
||||
* __set_pte() ensures we write the 64-bit PTE with 32-bit words in
|
||||
* the right order on 32-bit platforms and also allows us to write
|
||||
* hooks to check valid PTEs, etc., if we want.
|
||||
*/
|
||||
void __set_pte(pte_t *ptep, pte_t pte);
|
||||
|
||||
/*
|
||||
* set_pte() sets the given PTE and also sanity-checks the
|
||||
* requested PTE against the page homecaching. Unspecified parts
|
||||
* of the PTE are filled in when it is written to memory, i.e. all
|
||||
* caching attributes if "!forcecache", or the home cpu if "anyhome".
|
||||
*/
|
||||
extern void set_pte_order(pte_t *ptep, pte_t pte, int order);
|
||||
|
||||
#define set_pte(ptep, pteval) set_pte_order(ptep, pteval, 0)
|
||||
extern void set_pte(pte_t *ptep, pte_t pte);
|
||||
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
|
||||
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval)
|
||||
|
||||
@@ -292,21 +300,6 @@ extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).val >> 32 })
|
||||
#define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) })
|
||||
|
||||
/*
|
||||
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
|
||||
*
|
||||
* dst - pointer to pgd range anwhere on a pgd page
|
||||
* src - ""
|
||||
* count - the number of pgds to copy.
|
||||
*
|
||||
* dst and src can be on the same page, but the range must not overlap,
|
||||
* and must not cross a page boundary.
|
||||
*/
|
||||
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
|
||||
{
|
||||
memcpy(dst, src, count * sizeof(pgd_t));
|
||||
}
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
|
@@ -24,6 +24,7 @@
|
||||
#define PGDIR_SIZE HV_PAGE_SIZE_LARGE
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
||||
#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
|
||||
#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t))
|
||||
|
||||
/*
|
||||
* The level-2 index is defined by the difference between the huge
|
||||
@@ -33,6 +34,7 @@
|
||||
* this nomenclature is somewhat confusing.
|
||||
*/
|
||||
#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
|
||||
#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
@@ -94,7 +96,6 @@ static inline int pgd_addr_invalid(unsigned long addr)
|
||||
*/
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
|
||||
extern int ptep_test_and_clear_young(struct vm_area_struct *,
|
||||
unsigned long addr, pte_t *);
|
||||
@@ -110,6 +111,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
|
||||
{
|
||||
set_pte(&pmdp->pud.pgd, pmdval.pud.pgd);
|
||||
}
|
||||
|
||||
/* Create a pmd from a PTFN. */
|
||||
static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
|
||||
{
|
||||
|
@@ -269,7 +269,6 @@ extern char chip_model[64];
|
||||
/* Data on which physical memory controller corresponds to which NUMA node. */
|
||||
extern int node_controller[];
|
||||
|
||||
|
||||
/* Do we dump information to the console when a user application crashes? */
|
||||
extern int show_crashinfo;
|
||||
|
||||
|
@@ -141,6 +141,9 @@ struct single_step_state {
|
||||
/* Single-step the instruction at regs->pc */
|
||||
extern void single_step_once(struct pt_regs *regs);
|
||||
|
||||
/* Clean up after execve(). */
|
||||
extern void single_step_execve(void);
|
||||
|
||||
struct task_struct;
|
||||
|
||||
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
|
||||
|
@@ -78,13 +78,6 @@ void arch_spin_unlock_wait(arch_spinlock_t *lock);
|
||||
#define _RD_COUNT_SHIFT 24
|
||||
#define _RD_COUNT_WIDTH 8
|
||||
|
||||
/* Internal functions; do not use. */
|
||||
void arch_read_lock_slow(arch_rwlock_t *, u32);
|
||||
int arch_read_trylock_slow(arch_rwlock_t *);
|
||||
void arch_read_unlock_slow(arch_rwlock_t *);
|
||||
void arch_write_lock_slow(arch_rwlock_t *, u32);
|
||||
void arch_write_unlock_slow(arch_rwlock_t *, u32);
|
||||
|
||||
/**
|
||||
* arch_read_can_lock() - would read_trylock() succeed?
|
||||
*/
|
||||
@@ -104,94 +97,32 @@ static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
|
||||
/**
|
||||
* arch_read_lock() - acquire a read lock.
|
||||
*/
|
||||
static inline void arch_read_lock(arch_rwlock_t *rwlock)
|
||||
{
|
||||
u32 val = __insn_tns((int *)&rwlock->lock);
|
||||
if (unlikely(val << _RD_COUNT_WIDTH)) {
|
||||
arch_read_lock_slow(rwlock, val);
|
||||
return;
|
||||
}
|
||||
rwlock->lock = val + (1 << _RD_COUNT_SHIFT);
|
||||
}
|
||||
void arch_read_lock(arch_rwlock_t *rwlock);
|
||||
|
||||
/**
|
||||
* arch_read_lock() - acquire a write lock.
|
||||
* arch_write_lock() - acquire a write lock.
|
||||
*/
|
||||
static inline void arch_write_lock(arch_rwlock_t *rwlock)
|
||||
{
|
||||
u32 val = __insn_tns((int *)&rwlock->lock);
|
||||
if (unlikely(val != 0)) {
|
||||
arch_write_lock_slow(rwlock, val);
|
||||
return;
|
||||
}
|
||||
rwlock->lock = 1 << _WR_NEXT_SHIFT;
|
||||
}
|
||||
void arch_write_lock(arch_rwlock_t *rwlock);
|
||||
|
||||
/**
|
||||
* arch_read_trylock() - try to acquire a read lock.
|
||||
*/
|
||||
static inline int arch_read_trylock(arch_rwlock_t *rwlock)
|
||||
{
|
||||
int locked;
|
||||
u32 val = __insn_tns((int *)&rwlock->lock);
|
||||
if (unlikely(val & 1))
|
||||
return arch_read_trylock_slow(rwlock);
|
||||
locked = (val << _RD_COUNT_WIDTH) == 0;
|
||||
rwlock->lock = val + (locked << _RD_COUNT_SHIFT);
|
||||
return locked;
|
||||
}
|
||||
int arch_read_trylock(arch_rwlock_t *rwlock);
|
||||
|
||||
/**
|
||||
* arch_write_trylock() - try to acquire a write lock.
|
||||
*/
|
||||
static inline int arch_write_trylock(arch_rwlock_t *rwlock)
|
||||
{
|
||||
u32 val = __insn_tns((int *)&rwlock->lock);
|
||||
|
||||
/*
|
||||
* If a tns is in progress, or there's a waiting or active locker,
|
||||
* or active readers, we can't take the lock, so give up.
|
||||
*/
|
||||
if (unlikely(val != 0)) {
|
||||
if (!(val & 1))
|
||||
rwlock->lock = val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set the "next" field to mark it locked. */
|
||||
rwlock->lock = 1 << _WR_NEXT_SHIFT;
|
||||
return 1;
|
||||
}
|
||||
int arch_write_trylock(arch_rwlock_t *rwlock);
|
||||
|
||||
/**
|
||||
* arch_read_unlock() - release a read lock.
|
||||
*/
|
||||
static inline void arch_read_unlock(arch_rwlock_t *rwlock)
|
||||
{
|
||||
u32 val;
|
||||
mb(); /* guarantee anything modified under the lock is visible */
|
||||
val = __insn_tns((int *)&rwlock->lock);
|
||||
if (unlikely(val & 1)) {
|
||||
arch_read_unlock_slow(rwlock);
|
||||
return;
|
||||
}
|
||||
rwlock->lock = val - (1 << _RD_COUNT_SHIFT);
|
||||
}
|
||||
void arch_read_unlock(arch_rwlock_t *rwlock);
|
||||
|
||||
/**
|
||||
* arch_write_unlock() - release a write lock.
|
||||
*/
|
||||
static inline void arch_write_unlock(arch_rwlock_t *rwlock)
|
||||
{
|
||||
u32 val;
|
||||
mb(); /* guarantee anything modified under the lock is visible */
|
||||
val = __insn_tns((int *)&rwlock->lock);
|
||||
if (unlikely(val != (1 << _WR_NEXT_SHIFT))) {
|
||||
arch_write_unlock_slow(rwlock, val);
|
||||
return;
|
||||
}
|
||||
rwlock->lock = 0;
|
||||
}
|
||||
void arch_write_unlock(arch_rwlock_t *rwlock);
|
||||
|
||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||
|
@@ -18,13 +18,14 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/backtrace.h>
|
||||
#include <asm/page.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
/* Everything we need to keep track of a backtrace iteration */
|
||||
struct KBacktraceIterator {
|
||||
BacktraceIterator it;
|
||||
struct task_struct *task; /* task we are backtracing */
|
||||
HV_PTE *pgtable; /* page table for user space access */
|
||||
pte_t *pgtable; /* page table for user space access */
|
||||
int end; /* iteration complete. */
|
||||
int new_context; /* new context is starting */
|
||||
int profile; /* profiling, so stop on async intrpt */
|
||||
|
@@ -90,7 +90,24 @@
|
||||
#endif
|
||||
|
||||
#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
|
||||
int __mb_incoherent(void); /* Helper routine for mb_incoherent(). */
|
||||
#include <hv/syscall_public.h>
|
||||
/*
|
||||
* Issue an uncacheable load to each memory controller, then
|
||||
* wait until those loads have completed.
|
||||
*/
|
||||
static inline void __mb_incoherent(void)
|
||||
{
|
||||
long clobber_r10;
|
||||
asm volatile("swint2"
|
||||
: "=R10" (clobber_r10)
|
||||
: "R10" (HV_SYS_fence_incoherent)
|
||||
: "r0", "r1", "r2", "r3", "r4",
|
||||
"r5", "r6", "r7", "r8", "r9",
|
||||
"r11", "r12", "r13", "r14",
|
||||
"r15", "r16", "r17", "r18", "r19",
|
||||
"r20", "r21", "r22", "r23", "r24",
|
||||
"r25", "r26", "r27", "r28", "r29");
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Fence to guarantee visibility of stores to incoherent memory. */
|
||||
|
@@ -68,6 +68,7 @@ struct thread_info {
|
||||
#else
|
||||
#define THREAD_SIZE_ORDER (0)
|
||||
#endif
|
||||
#define THREAD_SIZE_PAGES (1 << THREAD_SIZE_ORDER)
|
||||
|
||||
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
|
||||
#define LOG2_THREAD_SIZE (PAGE_SHIFT + THREAD_SIZE_ORDER)
|
||||
|
@@ -38,6 +38,9 @@ static inline cycles_t get_cycles(void)
|
||||
|
||||
cycles_t get_clock_rate(void);
|
||||
|
||||
/* Convert nanoseconds to core clock cycles. */
|
||||
cycles_t ns2cycles(unsigned long nsecs);
|
||||
|
||||
/* Called at cpu initialization to set some low-level constants. */
|
||||
void setup_clock(void);
|
||||
|
||||
|
Reference in New Issue
Block a user