Merge branch 'origin' into for-linus
Conflicts: MAINTAINERS
This commit is contained in:
@@ -334,14 +334,14 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
|
||||
#ifndef CONFIG_CPU_CACHE_VIPT
|
||||
static inline void flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
|
||||
__cpuc_flush_user_all();
|
||||
}
|
||||
|
||||
static inline void
|
||||
flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
|
||||
{
|
||||
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
|
||||
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
|
||||
vma->vm_flags);
|
||||
}
|
||||
@@ -349,7 +349,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
|
||||
static inline void
|
||||
flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
|
||||
{
|
||||
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
|
||||
unsigned long addr = user_addr & PAGE_MASK;
|
||||
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
|
||||
}
|
||||
@@ -360,7 +360,7 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long uaddr, void *kaddr,
|
||||
unsigned long len, int write)
|
||||
{
|
||||
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
|
||||
unsigned long addr = (unsigned long)kaddr;
|
||||
__cpuc_coherent_kern_range(addr, addr + len);
|
||||
}
|
||||
|
@@ -187,11 +187,74 @@ union iop3xx_desc {
|
||||
void *ptr;
|
||||
};
|
||||
|
||||
/* No support for p+q operations */
|
||||
static inline int
|
||||
iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
|
||||
unsigned long flags)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline void
|
||||
iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline void
|
||||
iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
|
||||
dma_addr_t addr, unsigned char coef)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline int
|
||||
iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
|
||||
unsigned long flags)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline void
|
||||
iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
|
||||
|
||||
static inline void
|
||||
iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
|
||||
dma_addr_t *src)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline int iop_adma_get_max_xor(void)
|
||||
{
|
||||
return 32;
|
||||
}
|
||||
|
||||
static inline int iop_adma_get_max_pq(void)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
|
||||
{
|
||||
int id = chan->device->id;
|
||||
@@ -332,6 +395,11 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
|
||||
return slot_cnt;
|
||||
}
|
||||
|
||||
static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
|
||||
struct iop_adma_chan *chan)
|
||||
{
|
||||
@@ -349,6 +417,14 @@ static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
|
||||
struct iop_adma_chan *chan)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
|
||||
struct iop_adma_chan *chan)
|
||||
{
|
||||
@@ -756,13 +832,14 @@ static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
|
||||
hw_desc->src[0] = val;
|
||||
}
|
||||
|
||||
static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
|
||||
static inline enum sum_check_flags
|
||||
iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
|
||||
{
|
||||
struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
|
||||
struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
|
||||
|
||||
iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
|
||||
return desc_ctrl.zero_result_err;
|
||||
return desc_ctrl.zero_result_err << SUM_CHECK_P;
|
||||
}
|
||||
|
||||
static inline void iop_chan_append(struct iop_adma_chan *chan)
|
||||
|
@@ -86,6 +86,7 @@ struct iop_adma_chan {
|
||||
* @idx: pool index
|
||||
* @unmap_src_cnt: number of xor sources
|
||||
* @unmap_len: transaction bytecount
|
||||
* @tx_list: list of descriptors that are associated with one operation
|
||||
* @async_tx: support for the async_tx api
|
||||
* @group_list: list of slots that make up a multi-descriptor transaction
|
||||
* for example transfer lengths larger than the supported hw max
|
||||
@@ -102,10 +103,12 @@ struct iop_adma_desc_slot {
|
||||
u16 idx;
|
||||
u16 unmap_src_cnt;
|
||||
size_t unmap_len;
|
||||
struct list_head tx_list;
|
||||
struct dma_async_tx_descriptor async_tx;
|
||||
union {
|
||||
u32 *xor_check_result;
|
||||
u32 *crc32_result;
|
||||
u32 *pq_check_result;
|
||||
};
|
||||
};
|
||||
|
||||
|
@@ -1,17 +1 @@
|
||||
#ifndef __ARM_MMAN_H__
|
||||
#define __ARM_MMAN_H__
|
||||
|
||||
#include <asm-generic/mman-common.h>
|
||||
|
||||
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
|
||||
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
|
||||
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
|
||||
#define MAP_LOCKED 0x2000 /* pages are locked */
|
||||
#define MAP_NORESERVE 0x4000 /* don't check for reservations */
|
||||
#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */
|
||||
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
|
||||
|
||||
#define MCL_CURRENT 1 /* lock all current mappings */
|
||||
#define MCL_FUTURE 2 /* lock all future mappings */
|
||||
|
||||
#endif /* __ARM_MMAN_H__ */
|
||||
#include <asm-generic/mman.h>
|
||||
|
@@ -103,14 +103,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* check for possible thread migration */
|
||||
if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask))
|
||||
if (!cpumask_empty(mm_cpumask(next)) &&
|
||||
!cpumask_test_cpu(cpu, mm_cpumask(next)))
|
||||
__flush_icache_all();
|
||||
#endif
|
||||
if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) {
|
||||
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
|
||||
check_context(next);
|
||||
cpu_switch_mm(next->pgd, next);
|
||||
if (cache_is_vivt())
|
||||
cpu_clear(cpu, prev->cpu_vm_mask);
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@@ -93,7 +93,6 @@ extern void platform_cpu_enable(unsigned int cpu);
|
||||
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
|
||||
|
||||
/*
|
||||
* show local interrupt info
|
||||
|
@@ -350,7 +350,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
|
||||
if (tlb_flag(TLB_WB))
|
||||
dsb();
|
||||
|
||||
if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
|
||||
if (tlb_flag(TLB_V3_FULL))
|
||||
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
|
||||
if (tlb_flag(TLB_V4_U_FULL))
|
||||
@@ -388,7 +388,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
||||
if (tlb_flag(TLB_WB))
|
||||
dsb();
|
||||
|
||||
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
|
||||
if (tlb_flag(TLB_V3_PAGE))
|
||||
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc");
|
||||
if (tlb_flag(TLB_V4_U_PAGE))
|
||||
|
@@ -390,7 +390,7 @@
|
||||
#define __NR_preadv (__NR_SYSCALL_BASE+361)
|
||||
#define __NR_pwritev (__NR_SYSCALL_BASE+362)
|
||||
#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363)
|
||||
#define __NR_perf_counter_open (__NR_SYSCALL_BASE+364)
|
||||
#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
|
||||
|
||||
/*
|
||||
* The following SWIs are ARM private.
|
||||
|
Reference in New Issue
Block a user