Merge tag 'v4.2-rc8' into x86/mm, before applying new changes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar
2015-08-25 09:59:19 +02:00
10829 changed files with 1045595 additions and 245104 deletions

View File

@@ -6,6 +6,5 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += rwsem.h
generic-y += scatterlist.h
generic-y += trace_clock.h
generic-y += vtime.h

View File

@@ -34,7 +34,7 @@
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
#ifdef __SUBARCH_HAS_LWSYNC
# define SMPWMB LWSYNC
@@ -89,5 +89,6 @@ do { \
#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()
#define smp_mb__before_spinlock() smp_mb()
#endif /* _ASM_POWERPC_BARRIER_H */

View File

@@ -144,7 +144,6 @@ __xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
* Compare and exchange - if *p == old, set it to new,
* and return the old value of *p.
*/
#define __HAVE_ARCH_CMPXCHG 1
static __always_inline unsigned long
__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)

View File

@@ -242,11 +242,13 @@ enum {
/* We only set the TM feature if the kernel was compiled with TM supprt */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
#define CPU_FTR_TM_COMP CPU_FTR_TM
#define PPC_FEATURE2_HTM_COMP PPC_FEATURE2_HTM
#define CPU_FTR_TM_COMP CPU_FTR_TM
#define PPC_FEATURE2_HTM_COMP PPC_FEATURE2_HTM
#define PPC_FEATURE2_HTM_NOSC_COMP PPC_FEATURE2_HTM_NOSC
#else
#define CPU_FTR_TM_COMP 0
#define PPC_FEATURE2_HTM_COMP 0
#define CPU_FTR_TM_COMP 0
#define PPC_FEATURE2_HTM_COMP 0
#define PPC_FEATURE2_HTM_NOSC_COMP 0
#endif
/* We need to mark all pages as being coherent if we're SMP or we have a
@@ -366,7 +368,7 @@ enum {
CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE)
#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_USE_TB)
#define CPU_FTRS_8XX (CPU_FTR_USE_TB)
#define CPU_FTRS_8XX (CPU_FTR_USE_TB | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \

View File

@@ -31,9 +31,9 @@ extern cpumask_t threads_core_mask;
/* cpu_thread_mask_to_cores - Return a cpumask of one per cores
* hit by the argument
*
* @threads: a cpumask of threads
* @threads: a cpumask of online threads
*
* This function returns a cpumask which will have one "cpu" (or thread)
* This function returns a cpumask which will have one online cpu's
* bit set for each core that has at least one thread set in the argument.
*
* This can typically be used for things like IPI for tlb invalidations
@@ -42,13 +42,16 @@ extern cpumask_t threads_core_mask;
static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
{
cpumask_t tmp, res;
int i;
int i, cpu;
cpumask_clear(&res);
for (i = 0; i < NR_CPUS; i += threads_per_core) {
cpumask_shift_left(&tmp, &threads_core_mask, i);
if (cpumask_intersects(threads, &tmp))
cpumask_set_cpu(i, &res);
if (cpumask_intersects(threads, &tmp)) {
cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
if (cpu < nr_cpu_ids)
cpumask_set_cpu(cpu, &res);
}
}
return res;
}

View File

@@ -46,6 +46,9 @@ struct dev_archdata {
#ifdef CONFIG_FAIL_IOMMU
int fail_iommu;
#endif
#ifdef CONFIG_CXL_BASE
struct cxl_context *cxl_ctx;
#endif
};
struct pdev_archdata {

View File

@@ -12,11 +12,11 @@
#define ASM_EDAC_H
/*
* ECC atomic, DMA, SMP and interrupt safe scrub function.
* Implements the per arch atomic_scrub() that EDAC use for software
* Implements the per arch edac_atomic_scrub() that EDAC use for software
* ECC scrubbing. It reads memory and then writes back the original
* value, allowing the hardware to detect and correct memory errors.
*/
static __inline__ void atomic_scrub(void *va, u32 size)
static __inline__ void edac_atomic_scrub(void *va, u32 size)
{
unsigned int *virt_addr = va;
unsigned int temp;

View File

@@ -27,6 +27,8 @@
#include <linux/time.h>
#include <linux/atomic.h>
#include <uapi/asm/eeh.h>
struct pci_dev;
struct pci_bus;
struct pci_dn;
@@ -185,11 +187,6 @@ enum {
#define EEH_STATE_DMA_ACTIVE (1 << 4) /* Active DMA */
#define EEH_STATE_MMIO_ENABLED (1 << 5) /* MMIO enabled */
#define EEH_STATE_DMA_ENABLED (1 << 6) /* DMA enabled */
#define EEH_PE_STATE_NORMAL 0 /* Normal state */
#define EEH_PE_STATE_RESET 1 /* PE reset asserted */
#define EEH_PE_STATE_STOPPED_IO_DMA 2 /* Frozen PE */
#define EEH_PE_STATE_STOPPED_DMA 4 /* Stopped DMA, Enabled IO */
#define EEH_PE_STATE_UNAVAIL 5 /* Unavailable */
#define EEH_RESET_DEACTIVATE 0 /* Deactivate the PE reset */
#define EEH_RESET_HOT 1 /* Hot reset */
#define EEH_RESET_FUNDAMENTAL 3 /* Fundamental reset */
@@ -294,6 +291,8 @@ int eeh_pe_set_option(struct eeh_pe *pe, int option);
int eeh_pe_get_state(struct eeh_pe *pe);
int eeh_pe_reset(struct eeh_pe *pe, int option);
int eeh_pe_configure(struct eeh_pe *pe);
int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
unsigned long addr, unsigned long mask);
/**
* EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.

View File

@@ -112,11 +112,6 @@ static inline int prepare_hugepage_range(struct file *file,
return 0;
}
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
}
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
@@ -173,15 +168,6 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
return *ptep;
}
static inline int arch_prepare_hugepage(struct page *page)
{
return 0;
}
static inline void arch_release_hugepage(struct page *page)
{
}
static inline void arch_clear_hugepage_flags(struct page *page)
{
}

View File

@@ -0,0 +1,184 @@
/*
* ICSWX api
*
* Copyright (C) 2015 IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* This provides the Initiate Coprocessor Store Word Indexed (ICSWX)
* instruction. This instruction is used to communicate with PowerPC
* coprocessors. This also provides definitions of the structures used
* to communicate with the coprocessor.
*
* The RFC02130: Coprocessor Architecture document is the reference for
* everything in this file unless otherwise noted.
*/
#ifndef _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_
#define _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_
#include <asm/ppc-opcode.h> /* for PPC_ICSWX */
/* Chapter 6.5.8 Coprocessor-Completion Block (CCB) */
#define CCB_VALUE (0x3fffffffffffffff)
#define CCB_ADDRESS (0xfffffffffffffff8)
#define CCB_CM (0x0000000000000007)
#define CCB_CM0 (0x0000000000000004)
#define CCB_CM12 (0x0000000000000003)
#define CCB_CM0_ALL_COMPLETIONS (0x0)
#define CCB_CM0_LAST_IN_CHAIN (0x4)
#define CCB_CM12_STORE (0x0)
#define CCB_CM12_INTERRUPT (0x1)
#define CCB_SIZE (0x10)
#define CCB_ALIGN CCB_SIZE
struct coprocessor_completion_block {
__be64 value;
__be64 address;
} __packed __aligned(CCB_ALIGN);
/* Chapter 6.5.7 Coprocessor-Status Block (CSB) */
#define CSB_V (0x80)
#define CSB_F (0x04)
#define CSB_CH (0x03)
#define CSB_CE_INCOMPLETE (0x80)
#define CSB_CE_TERMINATION (0x40)
#define CSB_CE_TPBC (0x20)
#define CSB_CC_SUCCESS (0)
#define CSB_CC_INVALID_ALIGN (1)
#define CSB_CC_OPERAND_OVERLAP (2)
#define CSB_CC_DATA_LENGTH (3)
#define CSB_CC_TRANSLATION (5)
#define CSB_CC_PROTECTION (6)
#define CSB_CC_RD_EXTERNAL (7)
#define CSB_CC_INVALID_OPERAND (8)
#define CSB_CC_PRIVILEGE (9)
#define CSB_CC_INTERNAL (10)
#define CSB_CC_WR_EXTERNAL (12)
#define CSB_CC_NOSPC (13)
#define CSB_CC_EXCESSIVE_DDE (14)
#define CSB_CC_WR_TRANSLATION (15)
#define CSB_CC_WR_PROTECTION (16)
#define CSB_CC_UNKNOWN_CODE (17)
#define CSB_CC_ABORT (18)
#define CSB_CC_TRANSPORT (20)
#define CSB_CC_SEGMENTED_DDL (31)
#define CSB_CC_PROGRESS_POINT (32)
#define CSB_CC_DDE_OVERFLOW (33)
#define CSB_CC_SESSION (34)
#define CSB_CC_PROVISION (36)
#define CSB_CC_CHAIN (37)
#define CSB_CC_SEQUENCE (38)
#define CSB_CC_HW (39)
#define CSB_SIZE (0x10)
#define CSB_ALIGN CSB_SIZE
struct coprocessor_status_block {
u8 flags;
u8 cs;
u8 cc;
u8 ce;
__be32 count;
__be64 address;
} __packed __aligned(CSB_ALIGN);
/* Chapter 6.5.10 Data-Descriptor List (DDL)
* each list contains one or more Data-Descriptor Entries (DDE)
*/
#define DDE_P (0x8000)
#define DDE_SIZE (0x10)
#define DDE_ALIGN DDE_SIZE
struct data_descriptor_entry {
__be16 flags;
u8 count;
u8 index;
__be32 length;
__be64 address;
} __packed __aligned(DDE_ALIGN);
/* Chapter 6.5.2 Coprocessor-Request Block (CRB) */
#define CRB_SIZE (0x80)
#define CRB_ALIGN (0x100) /* Errata: requires 256 alignment */
/* Coprocessor Status Block field
* ADDRESS address of CSB
* C CCB is valid
* AT 0 = addrs are virtual, 1 = addrs are phys
* M enable perf monitor
*/
#define CRB_CSB_ADDRESS (0xfffffffffffffff0)
#define CRB_CSB_C (0x0000000000000008)
#define CRB_CSB_AT (0x0000000000000002)
#define CRB_CSB_M (0x0000000000000001)
struct coprocessor_request_block {
__be32 ccw;
__be32 flags;
__be64 csb_addr;
struct data_descriptor_entry source;
struct data_descriptor_entry target;
struct coprocessor_completion_block ccb;
u8 reserved[48];
struct coprocessor_status_block csb;
} __packed __aligned(CRB_ALIGN);
/* RFC02167 Initiate Coprocessor Instructions document
* Chapter 8.2.1.1.1 RS
* Chapter 8.2.3 Coprocessor Directive
* Chapter 8.2.4 Execution
*
* The CCW must be converted to BE before passing to icswx()
*/
#define CCW_PS (0xff000000)
#define CCW_CT (0x00ff0000)
#define CCW_CD (0x0000ffff)
#define CCW_CL (0x0000c000)
/* RFC02167 Initiate Coprocessor Instructions document
* Chapter 8.2.1 Initiate Coprocessor Store Word Indexed (ICSWX)
* Chapter 8.2.4.1 Condition Register 0
*/
#define ICSWX_INITIATED (0x8)
#define ICSWX_BUSY (0x4)
#define ICSWX_REJECTED (0x2)
static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb)
{
__be64 ccw_reg = ccw;
u32 cr;
__asm__ __volatile__(
PPC_ICSWX(%1,0,%2) "\n"
"mfcr %0\n"
: "=r" (cr)
: "r" (ccw_reg), "r" (crb)
: "cr0", "memory");
return (int)((cr >> 28) & 0xf);
}
#endif /* _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_ */

View File

@@ -44,6 +44,39 @@
extern int iommu_is_off;
extern int iommu_force_on;
struct iommu_table_ops {
/*
* When called with direction==DMA_NONE, it is equal to clear().
* uaddr is a linear map address.
*/
int (*set)(struct iommu_table *tbl,
long index, long npages,
unsigned long uaddr,
enum dma_data_direction direction,
struct dma_attrs *attrs);
#ifdef CONFIG_IOMMU_API
/*
* Exchanges existing TCE with new TCE plus direction bits;
* returns old TCE and DMA direction mask.
* @tce is a physical address.
*/
int (*exchange)(struct iommu_table *tbl,
long index,
unsigned long *hpa,
enum dma_data_direction *direction);
#endif
void (*clear)(struct iommu_table *tbl,
long index, long npages);
/* get() returns a physical address */
unsigned long (*get)(struct iommu_table *tbl, long index);
void (*flush)(struct iommu_table *tbl);
void (*free)(struct iommu_table *tbl);
};
/* These are used by VIO */
extern struct iommu_table_ops iommu_table_lpar_multi_ops;
extern struct iommu_table_ops iommu_table_pseries_ops;
/*
* IOMAP_MAX_ORDER defines the largest contiguous block
* of dma space we can get. IOMAP_MAX_ORDER = 13
@@ -64,6 +97,9 @@ struct iommu_pool {
struct iommu_table {
unsigned long it_busno; /* Bus number this table belongs to */
unsigned long it_size; /* Size of iommu table in entries */
unsigned long it_indirect_levels;
unsigned long it_level_size;
unsigned long it_allocated_size;
unsigned long it_offset; /* Offset into global table */
unsigned long it_base; /* mapped address of tce table */
unsigned long it_index; /* which iommu table this is */
@@ -75,15 +111,16 @@ struct iommu_table {
struct iommu_pool pools[IOMMU_NR_POOLS];
unsigned long *it_map; /* A simple allocation bitmap for now */
unsigned long it_page_shift;/* table iommu page size */
#ifdef CONFIG_IOMMU_API
struct iommu_group *it_group;
#endif
void (*set_bypass)(struct iommu_table *tbl, bool enable);
#ifdef CONFIG_PPC_POWERNV
void *data;
#endif
struct list_head it_group_list;/* List of iommu_table_group_link */
unsigned long *it_userspace; /* userspace view of the table */
struct iommu_table_ops *it_ops;
};
#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
((tbl)->it_userspace ? \
&((tbl)->it_userspace[(entry) - (tbl)->it_offset]) : \
NULL)
/* Pure 2^n version of get_order */
static inline __attribute_const__
int get_iommu_order(unsigned long size, struct iommu_table *tbl)
@@ -112,14 +149,62 @@ extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
*/
extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
int nid);
#define IOMMU_TABLE_GROUP_MAX_TABLES 2
struct iommu_table_group;
struct iommu_table_group_ops {
unsigned long (*get_table_size)(
__u32 page_shift,
__u64 window_size,
__u32 levels);
long (*create_table)(struct iommu_table_group *table_group,
int num,
__u32 page_shift,
__u64 window_size,
__u32 levels,
struct iommu_table **ptbl);
long (*set_window)(struct iommu_table_group *table_group,
int num,
struct iommu_table *tblnew);
long (*unset_window)(struct iommu_table_group *table_group,
int num);
/* Switch ownership from platform code to external user (e.g. VFIO) */
void (*take_ownership)(struct iommu_table_group *table_group);
/* Switch ownership from external user (e.g. VFIO) back to core */
void (*release_ownership)(struct iommu_table_group *table_group);
};
struct iommu_table_group_link {
struct list_head next;
struct rcu_head rcu;
struct iommu_table_group *table_group;
};
struct iommu_table_group {
/* IOMMU properties */
__u32 tce32_start;
__u32 tce32_size;
__u64 pgsizes; /* Bitmap of supported page sizes */
__u32 max_dynamic_windows_supported;
__u32 max_levels;
struct iommu_group *group;
struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
struct iommu_table_group_ops *ops;
};
#ifdef CONFIG_IOMMU_API
extern void iommu_register_group(struct iommu_table *tbl,
extern void iommu_register_group(struct iommu_table_group *table_group,
int pci_domain_number, unsigned long pe_num);
extern int iommu_add_device(struct device *dev);
extern void iommu_del_device(struct device *dev);
extern int __init tce_iommu_bus_notifier_init(void);
extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
unsigned long *hpa, enum dma_data_direction *direction);
#else
static inline void iommu_register_group(struct iommu_table *tbl,
static inline void iommu_register_group(struct iommu_table_group *table_group,
int pci_domain_number,
unsigned long pe_num)
{
@@ -140,13 +225,6 @@ static inline int __init tce_iommu_bus_notifier_init(void)
}
#endif /* !CONFIG_IOMMU_API */
static inline void set_iommu_table_base_and_group(struct device *dev,
void *base)
{
set_iommu_table_base(dev, base);
iommu_add_device(dev);
}
extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
unsigned long mask,
@@ -197,20 +275,13 @@ extern int iommu_tce_clear_param_check(struct iommu_table *tbl,
unsigned long npages);
extern int iommu_tce_put_param_check(struct iommu_table *tbl,
unsigned long ioba, unsigned long tce);
extern int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
unsigned long hwaddr, enum dma_data_direction direction);
extern unsigned long iommu_clear_tce(struct iommu_table *tbl,
unsigned long entry);
extern int iommu_clear_tces_and_put_pages(struct iommu_table *tbl,
unsigned long entry, unsigned long pages);
extern int iommu_put_tce_user_mode(struct iommu_table *tbl,
unsigned long entry, unsigned long tce);
extern void iommu_flush_tce(struct iommu_table *tbl);
extern int iommu_take_ownership(struct iommu_table *tbl);
extern void iommu_release_ownership(struct iommu_table *tbl);
extern enum dma_data_direction iommu_tce_direction(unsigned long tce);
extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir);
#endif /* __KERNEL__ */
#endif /* _ASM_IOMMU_H */

View File

@@ -430,7 +430,7 @@ static inline void note_hpte_modification(struct kvm *kvm,
*/
static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
{
return rcu_dereference_raw_notrace(kvm->memslots);
return rcu_dereference_raw_notrace(kvm->memslots[0]);
}
extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);

View File

@@ -698,7 +698,7 @@ struct kvm_vcpu_arch {
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_exit(void) {}

View File

@@ -182,10 +182,11 @@ extern int kvmppc_core_create_memslot(struct kvm *kvm,
unsigned long npages);
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem);
const struct kvm_userspace_memory_region *mem);
extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old);
const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
const struct kvm_memory_slot *new);
extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
struct kvm_ppc_smmu_info *info);
extern void kvmppc_core_flush_memslot(struct kvm *kvm,
@@ -243,10 +244,11 @@ struct kvmppc_ops {
void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
int (*prepare_memory_region)(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem);
const struct kvm_userspace_memory_region *mem);
void (*commit_memory_region)(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old);
const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
const struct kvm_memory_slot *new);
int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
unsigned long end);

View File

@@ -65,31 +65,6 @@ struct machdep_calls {
* destroyed as well */
void (*hpte_clear_all)(void);
int (*tce_build)(struct iommu_table *tbl,
long index,
long npages,
unsigned long uaddr,
enum dma_data_direction direction,
struct dma_attrs *attrs);
void (*tce_free)(struct iommu_table *tbl,
long index,
long npages);
unsigned long (*tce_get)(struct iommu_table *tbl,
long index);
void (*tce_flush)(struct iommu_table *tbl);
/* _rm versions are for real mode use only */
int (*tce_build_rm)(struct iommu_table *tbl,
long index,
long npages,
unsigned long uaddr,
enum dma_data_direction direction,
struct dma_attrs *attrs);
void (*tce_free_rm)(struct iommu_table *tbl,
long index,
long npages);
void (*tce_flush_rm)(struct iommu_table *tbl);
void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
unsigned long flags, void *caller);
void (*iounmap)(volatile void __iomem *token);
@@ -131,12 +106,6 @@ struct machdep_calls {
/* To setup PHBs when using automatic OF platform driver for PCI */
int (*pci_setup_phb)(struct pci_controller *host);
#ifdef CONFIG_PCI_MSI
int (*setup_msi_irqs)(struct pci_dev *dev,
int nvec, int type);
void (*teardown_msi_irqs)(struct pci_dev *dev);
#endif
void (*restart)(char *cmd);
void (*halt)(void);
void (*panic)(char *str);

View File

@@ -0,0 +1,28 @@
/*
* Architecture specific mm hooks
*
* Copyright (C) 2015, IBM Corporation
* Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_POWERPC_MM_ARCH_HOOKS_H
#define _ASM_POWERPC_MM_ARCH_HOOKS_H
static inline void arch_remap(struct mm_struct *mm,
unsigned long old_start, unsigned long old_end,
unsigned long new_start, unsigned long new_end)
{
/*
* mremap() doesn't allow moving multiple vmas so we can limit the
* check to old_start == vdso_base.
*/
if (old_start == mm->context.vdso_base)
mm->context.vdso_base = new_start;
}
#define arch_remap arch_remap
#endif /* _ASM_POWERPC_MM_ARCH_HOOKS_H */

View File

@@ -27,6 +27,19 @@
#define MI_Ks 0x80000000 /* Should not be set */
#define MI_Kp 0x40000000 /* Should always be set */
/*
* All pages' PP exec bits are set to 000, which means Execute for Supervisor
* and no Execute for User.
* Then we use the APG to say whether accesses are according to Page rules,
* "all Supervisor" rules (Exec for all) and "all User" rules (Exec for noone)
* Therefore, we define 4 APG groups. msb is _PAGE_EXEC, lsb is _PAGE_USER
* 0 (00) => Not User, no exec => 11 (all accesses performed as user)
* 1 (01) => User but no exec => 11 (all accesses performed as user)
* 2 (10) => Not User, exec => 01 (rights according to page definition)
* 3 (11) => User, exec => 00 (all accesses performed as supervisor)
*/
#define MI_APG_INIT 0xf4ffffff
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MI_RPN is written, bits in
* this register are used to create the TLB entry.
@@ -87,6 +100,19 @@
#define MD_Ks 0x80000000 /* Should not be set */
#define MD_Kp 0x40000000 /* Should always be set */
/*
* All pages' PP data bits are set to either 000 or 011, which means
* respectively RW for Supervisor and no access for User, or RO for
* Supervisor and no access for user.
* Then we use the APG to say whether accesses are according to Page rules or
* "all Supervisor" rules (Access to all)
* Therefore, we define 2 APG groups. lsb is _PAGE_USER
* 0 => No user => 01 (all accesses performed according to page definition)
* 1 => User => 00 (all accesses performed as supervisor
* according to page definition)
*/
#define MD_APG_INIT 0x4fffffff
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MD_RPN is written, bits in
* this register are used to create the TLB entry.
@@ -145,7 +171,14 @@ typedef struct {
} mm_context_t;
#endif /* !__ASSEMBLY__ */
#if (PAGE_SHIFT == 12)
#define mmu_virtual_psize MMU_PAGE_4K
#elif (PAGE_SHIFT == 14)
#define mmu_virtual_psize MMU_PAGE_16K
#else
#error "Unsupported PAGE_SIZE"
#endif
#define mmu_linear_psize MMU_PAGE_8M
#endif /* _ASM_POWERPC_MMU_8XX_H_ */

View File

@@ -536,6 +536,9 @@ typedef struct {
/* for 4K PTE fragment support */
void *pte_frag;
#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct list_head iommu_group_mem_list;
#endif
} mm_context_t;

View File

@@ -8,7 +8,6 @@
#include <linux/spinlock.h>
#include <asm/mmu.h>
#include <asm/cputable.h>
#include <asm-generic/mm_hooks.h>
#include <asm/cputhreads.h>
/*
@@ -16,6 +15,24 @@
*/
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct mm_iommu_table_group_mem_t;
extern bool mm_iommu_preregistered(void);
extern long mm_iommu_get(unsigned long ua, unsigned long entries,
struct mm_iommu_table_group_mem_t **pmem);
extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_init(mm_context_t *ctx);
extern void mm_iommu_cleanup(mm_context_t *ctx);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
#endif
extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
@@ -109,5 +126,27 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
#endif
}
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
}
static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
static inline void arch_unmap(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
mm->context.vdso_base = 0;
}
static inline void arch_bprm_mm_init(struct mm_struct *mm,
struct vm_area_struct *vma)
{
}
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */

View File

@@ -153,7 +153,8 @@
#define OPAL_FLASH_READ 110
#define OPAL_FLASH_WRITE 111
#define OPAL_FLASH_ERASE 112
#define OPAL_LAST 112
#define OPAL_PRD_MSG 113
#define OPAL_LAST 113
/* Device tree flags */
@@ -165,6 +166,13 @@
#define OPAL_PM_WINKLE_ENABLED 0x00040000
#define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000 /* with workaround */
/*
* OPAL_CONFIG_CPU_IDLE_STATE parameters
*/
#define OPAL_CONFIG_IDLE_FASTSLEEP 1
#define OPAL_CONFIG_IDLE_UNDO 0
#define OPAL_CONFIG_IDLE_APPLY 1
#ifndef __ASSEMBLY__
/* Other enums */
@@ -352,6 +360,7 @@ enum opal_msg_type {
OPAL_MSG_SHUTDOWN, /* params[0] = 1 reboot, 0 shutdown */
OPAL_MSG_HMI_EVT,
OPAL_MSG_DPO,
OPAL_MSG_PRD,
OPAL_MSG_TYPE_MAX,
};
@@ -674,6 +683,23 @@ typedef struct oppanel_line {
__be64 line_len;
} oppanel_line_t;
enum opal_prd_msg_type {
OPAL_PRD_MSG_TYPE_INIT = 0, /* HBRT --> OPAL */
OPAL_PRD_MSG_TYPE_FINI, /* HBRT/kernel --> OPAL */
OPAL_PRD_MSG_TYPE_ATTN, /* HBRT <-- OPAL */
OPAL_PRD_MSG_TYPE_ATTN_ACK, /* HBRT --> OPAL */
OPAL_PRD_MSG_TYPE_OCC_ERROR, /* HBRT <-- OPAL */
OPAL_PRD_MSG_TYPE_OCC_RESET, /* HBRT <-- OPAL */
};
struct opal_prd_msg_header {
uint8_t type;
uint8_t pad[1];
__be16 size;
};
struct opal_prd_msg;
/*
* SG entries
*

View File

@@ -186,6 +186,7 @@ int64_t opal_handle_hmi(void);
int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end);
int64_t opal_unregister_dump_region(uint32_t id);
int64_t opal_slw_set_reg(uint64_t cpu_pir, uint64_t sprn, uint64_t val);
int64_t opal_config_cpu_idle_state(uint64_t state, uint64_t flag);
int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number);
int64_t opal_ipmi_send(uint64_t interface, struct opal_ipmi_msg *msg,
uint64_t msg_len);
@@ -193,6 +194,7 @@ int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *msg,
uint64_t *msg_len);
int64_t opal_i2c_request(uint64_t async_token, uint32_t bus_id,
struct opal_i2c_request *oreq);
int64_t opal_prd_msg(struct opal_prd_msg *msg);
int64_t opal_flash_read(uint64_t id, uint64_t offset, uint64_t buf,
uint64_t size, uint64_t token);
@@ -239,6 +241,10 @@ extern int opal_elog_init(void);
extern void opal_platform_dump_init(void);
extern void opal_sys_param_init(void);
extern void opal_msglog_init(void);
extern int opal_async_comp_init(void);
extern int opal_sensor_init(void);
extern int opal_hmi_handler_init(void);
extern int opal_event_init(void);
extern int opal_machine_check(struct pt_regs *regs);
extern bool opal_mce_check_early_recovery(struct pt_regs *regs);
@@ -250,6 +256,8 @@ extern int opal_resync_timebase(void);
extern void opal_lpc_init(void);
extern int opal_event_request(unsigned int opal_event_nr);
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
unsigned long vmalloc_size);
void opal_free_sg_list(struct opal_sg_list *sg);

View File

@@ -278,9 +278,7 @@ extern long long virt_phys_offset;
#ifndef __ASSEMBLY__
#undef STRICT_MM_TYPECHECKS
#ifdef STRICT_MM_TYPECHECKS
#ifdef CONFIG_STRICT_MM_TYPECHECKS
/* These are used to make use of C type-checking. */
/* PTE level */

View File

@@ -27,9 +27,23 @@ struct pci_controller_ops {
* allow assignment/enabling of the device. */
bool (*enable_device_hook)(struct pci_dev *);
void (*disable_device)(struct pci_dev *);
void (*release_device)(struct pci_dev *);
/* Called during PCI resource reassignment */
resource_size_t (*window_alignment)(struct pci_bus *, unsigned long type);
void (*reset_secondary_bus)(struct pci_dev *dev);
#ifdef CONFIG_PCI_MSI
int (*setup_msi_irqs)(struct pci_dev *dev,
int nvec, int type);
void (*teardown_msi_irqs)(struct pci_dev *dev);
#endif
int (*dma_set_mask)(struct pci_dev *dev, u64 dma_mask);
void (*shutdown)(struct pci_controller *);
};
/*
@@ -185,7 +199,7 @@ struct pci_dn {
struct pci_dn *parent;
struct pci_controller *phb; /* for pci devices */
struct iommu_table *iommu_table; /* for phb's or bridges */
struct iommu_table_group *table_group; /* for phb's or bridges */
struct device_node *node; /* back-pointer to the device_node */
int pci_ext_config_space; /* for pci devices */

View File

@@ -13,9 +13,9 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <asm/machdep.h>
#include <asm/scatterlist.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
@@ -71,36 +71,6 @@ extern struct dma_map_ops *get_pci_dma_ops(void);
*/
#define PCI_DISABLE_MWI
#ifdef CONFIG_PCI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
unsigned long cacheline_size;
u8 byte;
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
if (byte == 0)
cacheline_size = 1024;
else
cacheline_size = (int) byte * 4;
*strat = PCI_DMA_BURST_MULTIPLE;
*strategy_parameter = cacheline_size;
}
#endif
#else /* 32-bit */
#ifdef CONFIG_PCI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
*strat = PCI_DMA_BURST_INFINITY;
*strategy_parameter = ~0UL;
}
#endif
#endif /* CONFIG_PPC64 */
extern int pci_domain_nr(struct pci_bus *bus);

View File

@@ -170,24 +170,6 @@ static inline unsigned long pte_update(pte_t *p,
#ifdef PTE_ATOMIC_UPDATES
unsigned long old, tmp;
#ifdef CONFIG_PPC_8xx
unsigned long tmp2;
__asm__ __volatile__("\
1: lwarx %0,0,%4\n\
andc %1,%0,%5\n\
or %1,%1,%6\n\
/* 0x200 == Extended encoding, bit 22 */ \
/* Bit 22 has to be 1 when _PAGE_USER is unset and _PAGE_RO is set */ \
rlwimi %1,%1,32-1,0x200\n /* get _PAGE_RO */ \
rlwinm %3,%1,32-2,0x200\n /* get _PAGE_USER */ \
andc %1,%1,%3\n\
stwcx. %1,0,%4\n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*p), "=&r" (tmp2)
: "r" (p), "r" (clr), "r" (set), "m" (*p)
: "cc" );
#else /* CONFIG_PPC_8xx */
__asm__ __volatile__("\
1: lwarx %0,0,%3\n\
andc %1,%0,%4\n\
@@ -198,7 +180,6 @@ static inline unsigned long pte_update(pte_t *p,
: "=&r" (old), "=&r" (tmp), "=m" (*p)
: "r" (p), "r" (clr), "r" (set), "m" (*p)
: "cc" );
#endif /* CONFIG_PPC_8xx */
#else /* PTE_ATOMIC_UPDATES */
unsigned long old = pte_val(*p);
*p = __pte((old & ~clr) | set);

View File

@@ -118,7 +118,7 @@
*/
#ifndef __real_pte
#ifdef STRICT_MM_TYPECHECKS
#ifdef CONFIG_STRICT_MM_TYPECHECKS
#define __real_pte(e,p) ((real_pte_t){(e)})
#define __rpte_to_pte(r) ((r).pte)
#else
@@ -347,11 +347,27 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/* Encode and de-code a swap entry */
#define __swp_type(entry) (((entry).val >> 1) & 0x3f)
#define __swp_offset(entry) ((entry).val >> 8)
#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT })
#define MAX_SWAPFILES_CHECK() do { \
BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
/* \
* Don't have overlapping bits with _PAGE_HPTEFLAGS \
* We filter HPTEFLAGS on set_pte. \
*/ \
BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
} while (0)
/*
* on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
*/
#define SWP_TYPE_BITS 5
#define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
& ((1UL << SWP_TYPE_BITS) - 1))
#define __swp_offset(x) ((x).val >> PTE_RPN_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) { \
((type) << _PAGE_BIT_SWAP_TYPE) \
| ((offset) << PTE_RPN_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
#define __swp_entry_to_pte(x) __pte((x).val)
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_init(void);
@@ -553,13 +569,9 @@ extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
extern pmd_t pmdp_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp);
#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp);
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp);
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
@@ -576,6 +588,10 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
extern void pmdp_splitting_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#define pmdp_collapse_flush pmdp_collapse_flush
#define __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable);

View File

@@ -11,7 +11,7 @@
#define _ASM_PNV_PCI_H
#include <linux/pci.h>
#include <misc/cxl.h>
#include <misc/cxl-base.h>
int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode);
int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,

View File

@@ -136,6 +136,8 @@
#define PPC_INST_DCBAL 0x7c2005ec
#define PPC_INST_DCBZL 0x7c2007ec
#define PPC_INST_ICBT 0x7c00002c
#define PPC_INST_ICSWX 0x7c00032d
#define PPC_INST_ICSWEPX 0x7c00076d
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LDARX 0x7c0000a8
@@ -403,4 +405,15 @@
#define MFTMR(tmr, r) stringify_in_c(.long PPC_INST_MFTMR | \
TMRN(tmr) | ___PPC_RT(r))
/* Coprocessor instructions */
#define PPC_ICSWX(s, a, b) stringify_in_c(.long PPC_INST_ICSWX | \
___PPC_RS(s) | \
___PPC_RA(a) | \
___PPC_RB(b))
#define PPC_ICSWEPX(s, a, b) stringify_in_c(.long PPC_INST_ICSWEPX | \
___PPC_RS(s) | \
___PPC_RA(a) | \
___PPC_RB(b))
#endif /* _ASM_POWERPC_PPC_OPCODE_H */

View File

@@ -295,6 +295,15 @@ struct thread_struct {
#endif
#ifdef CONFIG_PPC64
unsigned long dscr;
/*
* This member element dscr_inherit indicates that the process
* has explicitly attempted and changed the DSCR register value
* for itself. Hence kernel wont use the default CPU DSCR value
* contained in the PACA structure anymore during process context
* switch. Once this variable is set, this behaviour will also be
* inherited to all the children of this process from that point
* onwards.
*/
int dscr_inherit;
unsigned long ppr; /* used to save/restore SMT priority */
#endif

View File

@@ -34,35 +34,32 @@
#define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */
#define _PAGE_DIRTY 0x0100 /* C: page changed */
/* These 4 software bits must be masked out when the entry is loaded
* into the TLB, 1 SW bit left(0x0080).
/* These 4 software bits must be masked out when the L2 entry is loaded
* into the TLB.
*/
#define _PAGE_GUARDED 0x0010 /* software: guarded access */
#define _PAGE_ACCESSED 0x0020 /* software: page referenced */
#define _PAGE_WRITETHRU 0x0040 /* software: caching is write through */
#define _PAGE_GUARDED 0x0010 /* Copied to L1 G entry in DTLB */
#define _PAGE_USER 0x0020 /* Copied to L1 APG lsb */
#define _PAGE_EXEC 0x0040 /* Copied to L1 APG */
#define _PAGE_WRITETHRU 0x0080 /* software: caching is write through */
#define _PAGE_ACCESSED 0x0800 /* software: page referenced */
/* Setting any bits in the nibble with the follow two controls will
* require a TLB exception handler change. It is assumed unused bits
* are always zero.
*/
#define _PAGE_RO 0x0400 /* lsb PP bits */
#define _PAGE_USER 0x0800 /* msb PP bits */
/* set when _PAGE_USER is unset and _PAGE_RO is set */
#define _PAGE_KNLRO 0x0200
#define _PAGE_RO 0x0600 /* Supervisor RO, User no access */
#define _PMD_PRESENT 0x0001
#define _PMD_BAD 0x0ff0
#define _PMD_PAGE_MASK 0x000c
#define _PMD_PAGE_8M 0x000c
#define _PTE_NONE_MASK _PAGE_KNLRO
/* Until my rework is finished, 8xx still needs atomic PTE updates */
#define PTE_ATOMIC_UPDATES 1
/* We need to add _PAGE_SHARED to kernel pages */
#define _PAGE_KERNEL_RO (_PAGE_SHARED | _PAGE_RO | _PAGE_KNLRO)
#define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_RO | _PAGE_KNLRO)
#define _PAGE_KERNEL_RO (_PAGE_SHARED | _PAGE_RO)
#define _PAGE_KERNEL_ROX (_PAGE_SHARED | _PAGE_RO | _PAGE_EXEC)
#define _PAGE_KERNEL_RW (_PAGE_SHARED | _PAGE_DIRTY | _PAGE_RW | \
_PAGE_HWWRITE)
#define _PAGE_KERNEL_RWX (_PAGE_SHARED | _PAGE_DIRTY | _PAGE_RW | \
_PAGE_HWWRITE | _PAGE_EXEC)
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PTE_8xx_H */

View File

@@ -11,6 +11,7 @@
/* Architected bits */
#define _PAGE_PRESENT 0x000001 /* software: pte contains a translation */
#define _PAGE_SW1 0x000002
#define _PAGE_BIT_SWAP_TYPE 2
#define _PAGE_BAP_SR 0x000004
#define _PAGE_BAP_UR 0x000008
#define _PAGE_BAP_SW 0x000010

View File

@@ -85,10 +85,8 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
* 64-bit PTEs
*/
#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
#define PTE_RPN_MAX (1ULL << (64 - PTE_RPN_SHIFT))
#define PTE_RPN_MASK (~((1ULL<<PTE_RPN_SHIFT)-1))
#else
#define PTE_RPN_MAX (1UL << (32 - PTE_RPN_SHIFT))
#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
#endif

View File

@@ -16,6 +16,7 @@
*/
#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */
#define _PAGE_USER 0x0002 /* matches one of the PP bits */
#define _PAGE_BIT_SWAP_TYPE 2
#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
#define _PAGE_GUARDED 0x0008
/* We can derive Memory coherence from _PAGE_NO_CACHE */

View File

@@ -358,7 +358,7 @@ SYSCALL_SPU(setns)
COMPAT_SYS(process_vm_readv)
COMPAT_SYS(process_vm_writev)
SYSCALL(finit_module)
SYSCALL(ni_syscall) /* sys_kcmp */
SYSCALL(kcmp) /* sys_kcmp */
SYSCALL_SPU(sched_setattr)
SYSCALL_SPU(sched_getattr)
SYSCALL_SPU(renameat2)

View File

@@ -87,7 +87,7 @@ static inline int prrn_is_enabled(void)
#include <asm/smp.h>
#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
#endif

View File

@@ -144,6 +144,26 @@ TRACE_EVENT_FN(opal_exit,
);
#endif
TRACE_EVENT(hash_fault,
TP_PROTO(unsigned long addr, unsigned long access, unsigned long trap),
TP_ARGS(addr, access, trap),
TP_STRUCT__entry(
__field(unsigned long, addr)
__field(unsigned long, access)
__field(unsigned long, trap)
),
TP_fast_assign(
__entry->addr = addr;
__entry->access = access;
__entry->trap = trap;
),
TP_printk("hash fault with addr 0x%lx and access = 0x%lx trap = 0x%lx",
__entry->addr, __entry->access, __entry->trap)
);
#endif /* _TRACE_POWERPC_H */
#undef TRACE_INCLUDE_PATH

View File

@@ -265,7 +265,7 @@ do { \
({ \
long __gu_err; \
unsigned long __gu_val; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
might_fault(); \
@@ -279,7 +279,7 @@ do { \
({ \
long __gu_err; \
long long __gu_val; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
might_fault(); \
@@ -293,7 +293,7 @@ do { \
({ \
long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
might_fault(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
@@ -305,7 +305,7 @@ do { \
({ \
long __gu_err; \
unsigned long __gu_val; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \

View File

@@ -19,9 +19,9 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/mod_devicetable.h>
#include <linux/scatterlist.h>
#include <asm/hvcall.h>
#include <asm/scatterlist.h>
/*
* Architecture-specific constants for drivers to