Merge branches 'arnd-fixes', 'clk', 'misc', 'v7' and 'fixes' into for-next
This commit is contained in:
@@ -604,6 +604,22 @@ config CPU_USE_DOMAINS
|
||||
This option enables or disables the use of domain switching
|
||||
via the set_fs() function.
|
||||
|
||||
config CPU_V7M_NUM_IRQ
|
||||
int "Number of external interrupts connected to the NVIC"
|
||||
depends on CPU_V7M
|
||||
default 90 if ARCH_STM32
|
||||
default 38 if ARCH_EFM32
|
||||
default 112 if SOC_VF610
|
||||
default 240
|
||||
help
|
||||
This option indicates the number of interrupts connected to the NVIC.
|
||||
The value can be larger than the real number of interrupts supported
|
||||
by the system, but must not be lower.
|
||||
The default value is 240, corresponding to the maximum number of
|
||||
interrupts supported by the NVIC on Cortex-M family.
|
||||
|
||||
If unsure, keep default value.
|
||||
|
||||
#
|
||||
# CPU supports 36-bit I/O
|
||||
#
|
||||
@@ -624,6 +640,10 @@ config ARM_LPAE
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config ARM_PV_FIXUP
|
||||
def_bool y
|
||||
depends on ARM_LPAE && ARM_PATCH_PHYS_VIRT && ARCH_KEYSTONE
|
||||
|
||||
config ARCH_PHYS_ADDR_T_64BIT
|
||||
def_bool ARM_LPAE
|
||||
|
||||
|
@@ -18,6 +18,7 @@ obj-$(CONFIG_MODULES) += proc-syms.o
|
||||
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
|
||||
obj-$(CONFIG_HIGHMEM) += highmem.o
|
||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
obj-$(CONFIG_ARM_PV_FIXUP) += pv-fixup-asm.o
|
||||
|
||||
obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o
|
||||
obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o
|
||||
|
@@ -38,10 +38,11 @@ struct l2c_init_data {
|
||||
unsigned way_size_0;
|
||||
unsigned num_lock;
|
||||
void (*of_parse)(const struct device_node *, u32 *, u32 *);
|
||||
void (*enable)(void __iomem *, u32, unsigned);
|
||||
void (*enable)(void __iomem *, unsigned);
|
||||
void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
|
||||
void (*save)(void __iomem *);
|
||||
void (*configure)(void __iomem *);
|
||||
void (*unlock)(void __iomem *, unsigned);
|
||||
struct outer_cache_fns outer_cache;
|
||||
};
|
||||
|
||||
@@ -110,14 +111,6 @@ static inline void l2c_unlock(void __iomem *base, unsigned num)
|
||||
|
||||
static void l2c_configure(void __iomem *base)
|
||||
{
|
||||
if (outer_cache.configure) {
|
||||
outer_cache.configure(&l2x0_saved_regs);
|
||||
return;
|
||||
}
|
||||
|
||||
if (l2x0_data->configure)
|
||||
l2x0_data->configure(base);
|
||||
|
||||
l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
|
||||
}
|
||||
|
||||
@@ -125,18 +118,16 @@ static void l2c_configure(void __iomem *base)
|
||||
* Enable the L2 cache controller. This function must only be
|
||||
* called when the cache controller is known to be disabled.
|
||||
*/
|
||||
static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
|
||||
static void l2c_enable(void __iomem *base, unsigned num_lock)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* Do not touch the controller if already enabled. */
|
||||
if (readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)
|
||||
return;
|
||||
if (outer_cache.configure)
|
||||
outer_cache.configure(&l2x0_saved_regs);
|
||||
else
|
||||
l2x0_data->configure(base);
|
||||
|
||||
l2x0_saved_regs.aux_ctrl = aux;
|
||||
l2c_configure(base);
|
||||
|
||||
l2c_unlock(base, num_lock);
|
||||
l2x0_data->unlock(base, num_lock);
|
||||
|
||||
local_irq_save(flags);
|
||||
__l2c_op_way(base + L2X0_INV_WAY);
|
||||
@@ -163,7 +154,11 @@ static void l2c_save(void __iomem *base)
|
||||
|
||||
static void l2c_resume(void)
|
||||
{
|
||||
l2c_enable(l2x0_base, l2x0_saved_regs.aux_ctrl, l2x0_data->num_lock);
|
||||
void __iomem *base = l2x0_base;
|
||||
|
||||
/* Do not touch the controller if already enabled. */
|
||||
if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
|
||||
l2c_enable(base, l2x0_data->num_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -252,6 +247,8 @@ static const struct l2c_init_data l2c210_data __initconst = {
|
||||
.num_lock = 1,
|
||||
.enable = l2c_enable,
|
||||
.save = l2c_save,
|
||||
.configure = l2c_configure,
|
||||
.unlock = l2c_unlock,
|
||||
.outer_cache = {
|
||||
.inv_range = l2c210_inv_range,
|
||||
.clean_range = l2c210_clean_range,
|
||||
@@ -391,16 +388,22 @@ static void l2c220_sync(void)
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock)
|
||||
static void l2c220_enable(void __iomem *base, unsigned num_lock)
|
||||
{
|
||||
/*
|
||||
* Always enable non-secure access to the lockdown registers -
|
||||
* we write to them as part of the L2C enable sequence so they
|
||||
* need to be accessible.
|
||||
*/
|
||||
aux |= L220_AUX_CTRL_NS_LOCKDOWN;
|
||||
l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN;
|
||||
|
||||
l2c_enable(base, aux, num_lock);
|
||||
l2c_enable(base, num_lock);
|
||||
}
|
||||
|
||||
static void l2c220_unlock(void __iomem *base, unsigned num_lock)
|
||||
{
|
||||
if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN)
|
||||
l2c_unlock(base, num_lock);
|
||||
}
|
||||
|
||||
static const struct l2c_init_data l2c220_data = {
|
||||
@@ -409,6 +412,8 @@ static const struct l2c_init_data l2c220_data = {
|
||||
.num_lock = 1,
|
||||
.enable = l2c220_enable,
|
||||
.save = l2c_save,
|
||||
.configure = l2c_configure,
|
||||
.unlock = l2c220_unlock,
|
||||
.outer_cache = {
|
||||
.inv_range = l2c220_inv_range,
|
||||
.clean_range = l2c220_clean_range,
|
||||
@@ -569,6 +574,8 @@ static void l2c310_configure(void __iomem *base)
|
||||
{
|
||||
unsigned revision;
|
||||
|
||||
l2c_configure(base);
|
||||
|
||||
/* restore pl310 setup */
|
||||
l2c_write_sec(l2x0_saved_regs.tag_latency, base,
|
||||
L310_TAG_LATENCY_CTRL);
|
||||
@@ -603,10 +610,11 @@ static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, v
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
|
||||
static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
|
||||
{
|
||||
unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
|
||||
bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
|
||||
u32 aux = l2x0_saved_regs.aux_ctrl;
|
||||
|
||||
if (rev >= L310_CACHE_ID_RTL_R2P0) {
|
||||
if (cortex_a9) {
|
||||
@@ -649,9 +657,9 @@ static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
|
||||
* we write to them as part of the L2C enable sequence so they
|
||||
* need to be accessible.
|
||||
*/
|
||||
aux |= L310_AUX_CTRL_NS_LOCKDOWN;
|
||||
l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN;
|
||||
|
||||
l2c_enable(base, aux, num_lock);
|
||||
l2c_enable(base, num_lock);
|
||||
|
||||
/* Read back resulting AUX_CTRL value as it could have been altered. */
|
||||
aux = readl_relaxed(base + L2X0_AUX_CTRL);
|
||||
@@ -755,6 +763,12 @@ static void l2c310_resume(void)
|
||||
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
|
||||
}
|
||||
|
||||
static void l2c310_unlock(void __iomem *base, unsigned num_lock)
|
||||
{
|
||||
if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN)
|
||||
l2c_unlock(base, num_lock);
|
||||
}
|
||||
|
||||
static const struct l2c_init_data l2c310_init_fns __initconst = {
|
||||
.type = "L2C-310",
|
||||
.way_size_0 = SZ_8K,
|
||||
@@ -763,6 +777,7 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
|
||||
.fixup = l2c310_fixup,
|
||||
.save = l2c310_save,
|
||||
.configure = l2c310_configure,
|
||||
.unlock = l2c310_unlock,
|
||||
.outer_cache = {
|
||||
.inv_range = l2c210_inv_range,
|
||||
.clean_range = l2c210_clean_range,
|
||||
@@ -856,8 +871,11 @@ static int __init __l2c_init(const struct l2c_init_data *data,
|
||||
* Check if l2x0 controller is already enabled. If we are booting
|
||||
* in non-secure mode accessing the below registers will fault.
|
||||
*/
|
||||
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
|
||||
data->enable(l2x0_base, aux, data->num_lock);
|
||||
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
|
||||
l2x0_saved_regs.aux_ctrl = aux;
|
||||
|
||||
data->enable(l2x0_base, data->num_lock);
|
||||
}
|
||||
|
||||
outer_cache = fns;
|
||||
|
||||
@@ -1066,6 +1084,8 @@ static const struct l2c_init_data of_l2c210_data __initconst = {
|
||||
.of_parse = l2x0_of_parse,
|
||||
.enable = l2c_enable,
|
||||
.save = l2c_save,
|
||||
.configure = l2c_configure,
|
||||
.unlock = l2c_unlock,
|
||||
.outer_cache = {
|
||||
.inv_range = l2c210_inv_range,
|
||||
.clean_range = l2c210_clean_range,
|
||||
@@ -1084,6 +1104,8 @@ static const struct l2c_init_data of_l2c220_data __initconst = {
|
||||
.of_parse = l2x0_of_parse,
|
||||
.enable = l2c220_enable,
|
||||
.save = l2c_save,
|
||||
.configure = l2c_configure,
|
||||
.unlock = l2c220_unlock,
|
||||
.outer_cache = {
|
||||
.inv_range = l2c220_inv_range,
|
||||
.clean_range = l2c220_clean_range,
|
||||
@@ -1199,6 +1221,26 @@ static void __init l2c310_of_parse(const struct device_node *np,
|
||||
pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(np, "prefetch-data", &val);
|
||||
if (ret == 0) {
|
||||
if (val)
|
||||
prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
|
||||
else
|
||||
prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
|
||||
} else if (ret != -EINVAL) {
|
||||
pr_err("L2C-310 OF prefetch-data property value is missing\n");
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(np, "prefetch-instr", &val);
|
||||
if (ret == 0) {
|
||||
if (val)
|
||||
prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
|
||||
else
|
||||
prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
|
||||
} else if (ret != -EINVAL) {
|
||||
pr_err("L2C-310 OF prefetch-instr property value is missing\n");
|
||||
}
|
||||
|
||||
l2x0_saved_regs.prefetch_ctrl = prefetch;
|
||||
}
|
||||
|
||||
@@ -1211,6 +1253,7 @@ static const struct l2c_init_data of_l2c310_data __initconst = {
|
||||
.fixup = l2c310_fixup,
|
||||
.save = l2c310_save,
|
||||
.configure = l2c310_configure,
|
||||
.unlock = l2c310_unlock,
|
||||
.outer_cache = {
|
||||
.inv_range = l2c210_inv_range,
|
||||
.clean_range = l2c210_clean_range,
|
||||
@@ -1240,6 +1283,7 @@ static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
|
||||
.fixup = l2c310_fixup,
|
||||
.save = l2c310_save,
|
||||
.configure = l2c310_configure,
|
||||
.unlock = l2c310_unlock,
|
||||
.outer_cache = {
|
||||
.inv_range = l2c210_inv_range,
|
||||
.clean_range = l2c210_clean_range,
|
||||
@@ -1366,7 +1410,7 @@ static void aurora_save(void __iomem *base)
|
||||
* For Aurora cache in no outer mode, enable via the CP15 coprocessor
|
||||
* broadcasting of cache commands to L2.
|
||||
*/
|
||||
static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
|
||||
static void __init aurora_enable_no_outer(void __iomem *base,
|
||||
unsigned num_lock)
|
||||
{
|
||||
u32 u;
|
||||
@@ -1377,7 +1421,7 @@ static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
|
||||
|
||||
isb();
|
||||
|
||||
l2c_enable(base, aux, num_lock);
|
||||
l2c_enable(base, num_lock);
|
||||
}
|
||||
|
||||
static void __init aurora_fixup(void __iomem *base, u32 cache_id,
|
||||
@@ -1416,6 +1460,8 @@ static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
|
||||
.enable = l2c_enable,
|
||||
.fixup = aurora_fixup,
|
||||
.save = aurora_save,
|
||||
.configure = l2c_configure,
|
||||
.unlock = l2c_unlock,
|
||||
.outer_cache = {
|
||||
.inv_range = aurora_inv_range,
|
||||
.clean_range = aurora_clean_range,
|
||||
@@ -1435,6 +1481,8 @@ static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
|
||||
.enable = aurora_enable_no_outer,
|
||||
.fixup = aurora_fixup,
|
||||
.save = aurora_save,
|
||||
.configure = l2c_configure,
|
||||
.unlock = l2c_unlock,
|
||||
.outer_cache = {
|
||||
.resume = l2c_resume,
|
||||
},
|
||||
@@ -1585,6 +1633,7 @@ static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
|
||||
.enable = l2c310_enable,
|
||||
.save = l2c310_save,
|
||||
.configure = l2c310_configure,
|
||||
.unlock = l2c310_unlock,
|
||||
.outer_cache = {
|
||||
.inv_range = bcm_inv_range,
|
||||
.clean_range = bcm_clean_range,
|
||||
@@ -1608,6 +1657,7 @@ static void __init tauros3_save(void __iomem *base)
|
||||
|
||||
static void tauros3_configure(void __iomem *base)
|
||||
{
|
||||
l2c_configure(base);
|
||||
writel_relaxed(l2x0_saved_regs.aux2_ctrl,
|
||||
base + TAUROS3_AUX2_CTRL);
|
||||
writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
|
||||
@@ -1621,6 +1671,7 @@ static const struct l2c_init_data of_tauros3_data __initconst = {
|
||||
.enable = l2c_enable,
|
||||
.save = tauros3_save,
|
||||
.configure = tauros3_configure,
|
||||
.unlock = l2c_unlock,
|
||||
/* Tauros3 broadcasts L1 cache operations to L2 */
|
||||
.outer_cache = {
|
||||
.resume = l2c_resume,
|
||||
|
@@ -148,11 +148,14 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
|
||||
static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, struct dma_attrs *attrs);
|
||||
static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
struct dma_attrs *attrs);
|
||||
|
||||
struct dma_map_ops arm_coherent_dma_ops = {
|
||||
.alloc = arm_coherent_dma_alloc,
|
||||
.free = arm_coherent_dma_free,
|
||||
.mmap = arm_dma_mmap,
|
||||
.mmap = arm_coherent_dma_mmap,
|
||||
.get_sgtable = arm_dma_get_sgtable,
|
||||
.map_page = arm_coherent_dma_map_page,
|
||||
.map_sg = arm_dma_map_sg,
|
||||
@@ -690,10 +693,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
|
||||
attrs, __builtin_return_address(0));
|
||||
}
|
||||
|
||||
/*
|
||||
* Create userspace mapping for the DMA-coherent memory.
|
||||
*/
|
||||
int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
@@ -704,8 +704,6 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
unsigned long pfn = dma_to_pfn(dev, dma_addr);
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
||||
|
||||
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
||||
@@ -720,6 +718,26 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create userspace mapping for the DMA-coherent memory.
|
||||
*/
|
||||
static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
}
|
||||
|
||||
int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
#ifdef CONFIG_MMU
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
||||
#endif /* CONFIG_MMU */
|
||||
return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free a buffer as defined by the above mapping.
|
||||
*/
|
||||
@@ -1878,7 +1896,7 @@ struct dma_map_ops iommu_coherent_ops = {
|
||||
* arm_iommu_attach_device function.
|
||||
*/
|
||||
struct dma_iommu_mapping *
|
||||
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
|
||||
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
|
||||
{
|
||||
unsigned int bits = size >> PAGE_SHIFT;
|
||||
unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
|
||||
@@ -1886,6 +1904,10 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
|
||||
int extensions = 1;
|
||||
int err = -ENOMEM;
|
||||
|
||||
/* currently only 32-bit DMA address space is supported */
|
||||
if (size > DMA_BIT_MASK(32) + 1)
|
||||
return ERR_PTR(-ERANGE);
|
||||
|
||||
if (!bitmap_size)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
@@ -2057,13 +2079,6 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
if (!iommu)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* currently arm_iommu_create_mapping() takes a max of size_t
|
||||
* for size param. So check this limit for now.
|
||||
*/
|
||||
if (size > SIZE_MAX)
|
||||
return false;
|
||||
|
||||
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
|
||||
if (IS_ERR(mapping)) {
|
||||
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
|
||||
|
@@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the first non-section-aligned page, and point
|
||||
* Find the first non-pmd-aligned page, and point
|
||||
* memblock_limit at it. This relies on rounding the
|
||||
* limit down to be section-aligned, which happens at
|
||||
* the end of this function.
|
||||
* limit down to be pmd-aligned, which happens at the
|
||||
* end of this function.
|
||||
*
|
||||
* With this algorithm, the start or end of almost any
|
||||
* bank can be non-section-aligned. The only exception
|
||||
* is that the start of the bank 0 must be section-
|
||||
* bank can be non-pmd-aligned. The only exception is
|
||||
* that the start of the bank 0 must be section-
|
||||
* aligned, since otherwise memory would need to be
|
||||
* allocated when mapping the start of bank 0, which
|
||||
* occurs before any free memory is mapped.
|
||||
*/
|
||||
if (!memblock_limit) {
|
||||
if (!IS_ALIGNED(block_start, SECTION_SIZE))
|
||||
if (!IS_ALIGNED(block_start, PMD_SIZE))
|
||||
memblock_limit = block_start;
|
||||
else if (!IS_ALIGNED(block_end, SECTION_SIZE))
|
||||
else if (!IS_ALIGNED(block_end, PMD_SIZE))
|
||||
memblock_limit = arm_lowmem_limit;
|
||||
}
|
||||
|
||||
@@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void)
|
||||
high_memory = __va(arm_lowmem_limit - 1) + 1;
|
||||
|
||||
/*
|
||||
* Round the memblock limit down to a section size. This
|
||||
* Round the memblock limit down to a pmd size. This
|
||||
* helps to ensure that we will allocate memory from the
|
||||
* last full section, which should be mapped.
|
||||
* last full pmd, which should be mapped.
|
||||
*/
|
||||
if (memblock_limit)
|
||||
memblock_limit = round_down(memblock_limit, SECTION_SIZE);
|
||||
memblock_limit = round_down(memblock_limit, PMD_SIZE);
|
||||
if (!memblock_limit)
|
||||
memblock_limit = arm_lowmem_limit;
|
||||
|
||||
@@ -1387,123 +1387,98 @@ static void __init map_lowmem(void)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
#ifdef CONFIG_ARM_PV_FIXUP
|
||||
extern unsigned long __atags_pointer;
|
||||
typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata);
|
||||
pgtables_remap lpae_pgtables_remap_asm;
|
||||
|
||||
/*
|
||||
* early_paging_init() recreates boot time page table setup, allowing machines
|
||||
* to switch over to a high (>4G) address space on LPAE systems
|
||||
*/
|
||||
void __init early_paging_init(const struct machine_desc *mdesc,
|
||||
struct proc_info_list *procinfo)
|
||||
void __init early_paging_init(const struct machine_desc *mdesc)
|
||||
{
|
||||
pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags;
|
||||
unsigned long map_start, map_end;
|
||||
pgd_t *pgd0, *pgdk;
|
||||
pud_t *pud0, *pudk, *pud_start;
|
||||
pmd_t *pmd0, *pmdk;
|
||||
phys_addr_t phys;
|
||||
int i;
|
||||
pgtables_remap *lpae_pgtables_remap;
|
||||
unsigned long pa_pgd;
|
||||
unsigned int cr, ttbcr;
|
||||
long long offset;
|
||||
void *boot_data;
|
||||
|
||||
if (!(mdesc->init_meminfo))
|
||||
if (!mdesc->pv_fixup)
|
||||
return;
|
||||
|
||||
/* remap kernel code and data */
|
||||
map_start = init_mm.start_code & PMD_MASK;
|
||||
map_end = ALIGN(init_mm.brk, PMD_SIZE);
|
||||
offset = mdesc->pv_fixup();
|
||||
if (offset == 0)
|
||||
return;
|
||||
|
||||
/* get a handle on things... */
|
||||
pgd0 = pgd_offset_k(0);
|
||||
pud_start = pud0 = pud_offset(pgd0, 0);
|
||||
pmd0 = pmd_offset(pud0, 0);
|
||||
/*
|
||||
* Get the address of the remap function in the 1:1 identity
|
||||
* mapping setup by the early page table assembly code. We
|
||||
* must get this prior to the pv update. The following barrier
|
||||
* ensures that this is complete before we fixup any P:V offsets.
|
||||
*/
|
||||
lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm);
|
||||
pa_pgd = __pa(swapper_pg_dir);
|
||||
boot_data = __va(__atags_pointer);
|
||||
barrier();
|
||||
|
||||
pgdk = pgd_offset_k(map_start);
|
||||
pudk = pud_offset(pgdk, map_start);
|
||||
pmdk = pmd_offset(pudk, map_start);
|
||||
pr_info("Switching physical address space to 0x%08llx\n",
|
||||
(u64)PHYS_OFFSET + offset);
|
||||
|
||||
mdesc->init_meminfo();
|
||||
/* Re-set the phys pfn offset, and the pv offset */
|
||||
__pv_offset += offset;
|
||||
__pv_phys_pfn_offset += PFN_DOWN(offset);
|
||||
|
||||
/* Run the patch stub to update the constants */
|
||||
fixup_pv_table(&__pv_table_begin,
|
||||
(&__pv_table_end - &__pv_table_begin) << 2);
|
||||
|
||||
/*
|
||||
* Cache cleaning operations for self-modifying code
|
||||
* We should clean the entries by MVA but running a
|
||||
* for loop over every pv_table entry pointer would
|
||||
* just complicate the code.
|
||||
*/
|
||||
flush_cache_louis();
|
||||
dsb(ishst);
|
||||
isb();
|
||||
|
||||
/*
|
||||
* FIXME: This code is not architecturally compliant: we modify
|
||||
* the mappings in-place, indeed while they are in use by this
|
||||
* very same code. This may lead to unpredictable behaviour of
|
||||
* the CPU.
|
||||
*
|
||||
* Even modifying the mappings in a separate page table does
|
||||
* not resolve this.
|
||||
*
|
||||
* The architecture strongly recommends that when a mapping is
|
||||
* changed, that it is changed by first going via an invalid
|
||||
* mapping and back to the new mapping. This is to ensure that
|
||||
* no TLB conflicts (caused by the TLB having more than one TLB
|
||||
* entry match a translation) can occur. However, doing that
|
||||
* here will result in unmapping the code we are running.
|
||||
*/
|
||||
pr_warn("WARNING: unsafe modification of in-place page tables - tainting kernel\n");
|
||||
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
|
||||
|
||||
/*
|
||||
* Remap level 1 table. This changes the physical addresses
|
||||
* used to refer to the level 2 page tables to the high
|
||||
* physical address alias, leaving everything else the same.
|
||||
*/
|
||||
for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
|
||||
set_pud(pud0,
|
||||
__pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
|
||||
pmd0 += PTRS_PER_PMD;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remap the level 2 table, pointing the mappings at the high
|
||||
* physical address alias of these pages.
|
||||
*/
|
||||
phys = __pa(map_start);
|
||||
do {
|
||||
*pmdk++ = __pmd(phys | pmdprot);
|
||||
phys += PMD_SIZE;
|
||||
} while (phys < map_end);
|
||||
|
||||
/*
|
||||
* Ensure that the above updates are flushed out of the cache.
|
||||
* This is not strictly correct; on a system where the caches
|
||||
* are coherent with each other, but the MMU page table walks
|
||||
* may not be coherent, flush_cache_all() may be a no-op, and
|
||||
* this will fail.
|
||||
* We changing not only the virtual to physical mapping, but also
|
||||
* the physical addresses used to access memory. We need to flush
|
||||
* all levels of cache in the system with caching disabled to
|
||||
* ensure that all data is written back, and nothing is prefetched
|
||||
* into the caches. We also need to prevent the TLB walkers
|
||||
* allocating into the caches too. Note that this is ARMv7 LPAE
|
||||
* specific.
|
||||
*/
|
||||
cr = get_cr();
|
||||
set_cr(cr & ~(CR_I | CR_C));
|
||||
asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
|
||||
asm volatile("mcr p15, 0, %0, c2, c0, 2"
|
||||
: : "r" (ttbcr & ~(3 << 8 | 3 << 10)));
|
||||
flush_cache_all();
|
||||
|
||||
/*
|
||||
* Re-write the TTBR values to point them at the high physical
|
||||
* alias of the page tables. We expect __va() will work on
|
||||
* cpu_get_pgd(), which returns the value of TTBR0.
|
||||
* Fixup the page tables - this must be in the idmap region as
|
||||
* we need to disable the MMU to do this safely, and hence it
|
||||
* needs to be assembly. It's fairly simple, as we're using the
|
||||
* temporary tables setup by the initial assembly code.
|
||||
*/
|
||||
cpu_switch_mm(pgd0, &init_mm);
|
||||
cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
|
||||
lpae_pgtables_remap(offset, pa_pgd, boot_data);
|
||||
|
||||
/* Finally flush any stale TLB values. */
|
||||
local_flush_bp_all();
|
||||
local_flush_tlb_all();
|
||||
/* Re-enable the caches and cacheable TLB walks */
|
||||
asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
|
||||
set_cr(cr);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void __init early_paging_init(const struct machine_desc *mdesc,
|
||||
struct proc_info_list *procinfo)
|
||||
void __init early_paging_init(const struct machine_desc *mdesc)
|
||||
{
|
||||
if (mdesc->init_meminfo)
|
||||
mdesc->init_meminfo();
|
||||
long long offset;
|
||||
|
||||
if (!mdesc->pv_fixup)
|
||||
return;
|
||||
|
||||
offset = mdesc->pv_fixup();
|
||||
if (offset == 0)
|
||||
return;
|
||||
|
||||
pr_crit("Physical address space modification is only to support Keystone2.\n");
|
||||
pr_crit("Please enable ARM_LPAE and ARM_PATCH_PHYS_VIRT support to use this\n");
|
||||
pr_crit("feature. Your kernel may crash now, have a good day.\n");
|
||||
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -303,15 +303,6 @@ void __init sanity_check_meminfo(void)
|
||||
memblock_set_current_limit(end);
|
||||
}
|
||||
|
||||
/*
|
||||
* early_paging_init() recreates boot time page table setup, allowing machines
|
||||
* to switch over to a high (>4G) address space on LPAE systems
|
||||
*/
|
||||
void __init early_paging_init(const struct machine_desc *mdesc,
|
||||
struct proc_info_list *procinfo)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* paging_init() sets up the page tables, initialises the zone memory
|
||||
* maps, and sets up the zero page, bad page and bad page tables.
|
||||
|
@@ -22,8 +22,6 @@
|
||||
*
|
||||
* These are the low level assembler for performing cache and TLB
|
||||
* functions on the arm1020.
|
||||
*
|
||||
* CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
|
@@ -22,8 +22,6 @@
|
||||
*
|
||||
* These are the low level assembler for performing cache and TLB
|
||||
* functions on the arm1020e.
|
||||
*
|
||||
* CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
|
@@ -441,9 +441,6 @@ ENTRY(cpu_arm925_set_pte_ext)
|
||||
.type __arm925_setup, #function
|
||||
__arm925_setup:
|
||||
mov r0, #0
|
||||
#if defined(CONFIG_CPU_ICACHE_STREAMING_DISABLE)
|
||||
orr r0,r0,#1 << 7
|
||||
#endif
|
||||
|
||||
/* Transparent on, D-cache clean & flush mode. See NOTE2 above */
|
||||
orr r0,r0,#1 << 1 @ transparent mode on
|
||||
|
@@ -602,7 +602,6 @@ __\name\()_proc_info:
|
||||
PMD_SECT_AP_WRITE | \
|
||||
PMD_SECT_AP_READ
|
||||
initfn __feroceon_setup, __\name\()_proc_info
|
||||
.long __feroceon_setup
|
||||
.long cpu_arch_name
|
||||
.long cpu_elf_name
|
||||
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
|
||||
|
@@ -36,14 +36,16 @@
|
||||
*
|
||||
* It is assumed that:
|
||||
* - we are not using split page tables
|
||||
*
|
||||
* Note that we always need to flush BTAC/BTB if IBE is set
|
||||
* even on Cortex-A8 revisions not affected by 430973.
|
||||
* If IBE is not set, the flush BTAC/BTB won't do anything.
|
||||
*/
|
||||
ENTRY(cpu_ca8_switch_mm)
|
||||
#ifdef CONFIG_MMU
|
||||
mov r2, #0
|
||||
#ifdef CONFIG_ARM_ERRATA_430973
|
||||
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
|
||||
#endif
|
||||
#endif
|
||||
ENTRY(cpu_v7_switch_mm)
|
||||
#ifdef CONFIG_MMU
|
||||
mmid r1, r1 @ get mm->context.id
|
||||
@@ -148,10 +150,10 @@ ENDPROC(cpu_v7_set_pte_ext)
|
||||
* Macro for setting up the TTBRx and TTBCR registers.
|
||||
* - \ttb0 and \ttb1 updated with the corresponding flags.
|
||||
*/
|
||||
.macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp
|
||||
.macro v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp
|
||||
mcr p15, 0, \zero, c2, c0, 2 @ TTB control register
|
||||
ALT_SMP(orr \ttbr0, \ttbr0, #TTB_FLAGS_SMP)
|
||||
ALT_UP(orr \ttbr0, \ttbr0, #TTB_FLAGS_UP)
|
||||
ALT_SMP(orr \ttbr0l, \ttbr0l, #TTB_FLAGS_SMP)
|
||||
ALT_UP(orr \ttbr0l, \ttbr0l, #TTB_FLAGS_UP)
|
||||
ALT_SMP(orr \ttbr1, \ttbr1, #TTB_FLAGS_SMP)
|
||||
ALT_UP(orr \ttbr1, \ttbr1, #TTB_FLAGS_UP)
|
||||
mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1
|
||||
|
@@ -126,11 +126,10 @@ ENDPROC(cpu_v7_set_pte_ext)
|
||||
* Macro for setting up the TTBRx and TTBCR registers.
|
||||
* - \ttbr1 updated.
|
||||
*/
|
||||
.macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp
|
||||
.macro v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp
|
||||
ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address
|
||||
mov \tmp, \tmp, lsr #ARCH_PGD_SHIFT
|
||||
cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET?
|
||||
mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register
|
||||
cmp \ttbr1, \tmp, lsr #12 @ PHYS_OFFSET > PAGE_OFFSET?
|
||||
mrc p15, 0, \tmp, c2, c0, 2 @ TTB control egister
|
||||
orr \tmp, \tmp, #TTB_EAE
|
||||
ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP)
|
||||
ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP)
|
||||
@@ -143,13 +142,10 @@ ENDPROC(cpu_v7_set_pte_ext)
|
||||
*/
|
||||
orrls \tmp, \tmp, #TTBR1_SIZE @ TTBCR.T1SZ
|
||||
mcr p15, 0, \tmp, c2, c0, 2 @ TTBCR
|
||||
mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
|
||||
mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits
|
||||
mov \tmp, \ttbr1, lsr #20
|
||||
mov \ttbr1, \ttbr1, lsl #12
|
||||
addls \ttbr1, \ttbr1, #TTBR1_OFFSET
|
||||
mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1
|
||||
mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
|
||||
mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits
|
||||
mcrr p15, 0, \ttbr0, \tmp, c2 @ load TTBR0
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
@@ -252,6 +252,12 @@ ENDPROC(cpu_pj4b_do_resume)
|
||||
* Initialise TLB, Caches, and MMU state ready to switch the MMU
|
||||
* on. Return in r0 the new CP15 C1 control register setting.
|
||||
*
|
||||
* r1, r2, r4, r5, r9, r13 must be preserved - r13 is not a stack
|
||||
* r4: TTBR0 (low word)
|
||||
* r5: TTBR0 (high word if LPAE)
|
||||
* r8: TTBR1
|
||||
* r9: Main ID register
|
||||
*
|
||||
* This should be able to cover all ARMv7 cores.
|
||||
*
|
||||
* It is assumed that:
|
||||
@@ -279,6 +285,78 @@ __v7_ca17mp_setup:
|
||||
#endif
|
||||
b __v7_setup
|
||||
|
||||
/*
|
||||
* Errata:
|
||||
* r0, r10 available for use
|
||||
* r1, r2, r4, r5, r9, r13: must be preserved
|
||||
* r3: contains MIDR rX number in bits 23-20
|
||||
* r6: contains MIDR rXpY as 8-bit XY number
|
||||
* r9: MIDR
|
||||
*/
|
||||
__ca8_errata:
|
||||
#if defined(CONFIG_ARM_ERRATA_430973) && !defined(CONFIG_ARCH_MULTIPLATFORM)
|
||||
teq r3, #0x00100000 @ only present in r1p*
|
||||
mrceq p15, 0, r0, c1, c0, 1 @ read aux control register
|
||||
orreq r0, r0, #(1 << 6) @ set IBE to 1
|
||||
mcreq p15, 0, r0, c1, c0, 1 @ write aux control register
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_ERRATA_458693
|
||||
teq r6, #0x20 @ only present in r2p0
|
||||
mrceq p15, 0, r0, c1, c0, 1 @ read aux control register
|
||||
orreq r0, r0, #(1 << 5) @ set L1NEON to 1
|
||||
orreq r0, r0, #(1 << 9) @ set PLDNOP to 1
|
||||
mcreq p15, 0, r0, c1, c0, 1 @ write aux control register
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_ERRATA_460075
|
||||
teq r6, #0x20 @ only present in r2p0
|
||||
mrceq p15, 1, r0, c9, c0, 2 @ read L2 cache aux ctrl register
|
||||
tsteq r0, #1 << 22
|
||||
orreq r0, r0, #(1 << 22) @ set the Write Allocate disable bit
|
||||
mcreq p15, 1, r0, c9, c0, 2 @ write the L2 cache aux ctrl register
|
||||
#endif
|
||||
b __errata_finish
|
||||
|
||||
__ca9_errata:
|
||||
#ifdef CONFIG_ARM_ERRATA_742230
|
||||
cmp r6, #0x22 @ only present up to r2p2
|
||||
mrcle p15, 0, r0, c15, c0, 1 @ read diagnostic register
|
||||
orrle r0, r0, #1 << 4 @ set bit #4
|
||||
mcrle p15, 0, r0, c15, c0, 1 @ write diagnostic register
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_ERRATA_742231
|
||||
teq r6, #0x20 @ present in r2p0
|
||||
teqne r6, #0x21 @ present in r2p1
|
||||
teqne r6, #0x22 @ present in r2p2
|
||||
mrceq p15, 0, r0, c15, c0, 1 @ read diagnostic register
|
||||
orreq r0, r0, #1 << 12 @ set bit #12
|
||||
orreq r0, r0, #1 << 22 @ set bit #22
|
||||
mcreq p15, 0, r0, c15, c0, 1 @ write diagnostic register
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_ERRATA_743622
|
||||
teq r3, #0x00200000 @ only present in r2p*
|
||||
mrceq p15, 0, r0, c15, c0, 1 @ read diagnostic register
|
||||
orreq r0, r0, #1 << 6 @ set bit #6
|
||||
mcreq p15, 0, r0, c15, c0, 1 @ write diagnostic register
|
||||
#endif
|
||||
#if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP)
|
||||
ALT_SMP(cmp r6, #0x30) @ present prior to r3p0
|
||||
ALT_UP_B(1f)
|
||||
mrclt p15, 0, r0, c15, c0, 1 @ read diagnostic register
|
||||
orrlt r0, r0, #1 << 11 @ set bit #11
|
||||
mcrlt p15, 0, r0, c15, c0, 1 @ write diagnostic register
|
||||
1:
|
||||
#endif
|
||||
b __errata_finish
|
||||
|
||||
__ca15_errata:
|
||||
#ifdef CONFIG_ARM_ERRATA_773022
|
||||
cmp r6, #0x4 @ only present up to r0p4
|
||||
mrcle p15, 0, r0, c1, c0, 1 @ read aux control register
|
||||
orrle r0, r0, #1 << 1 @ disable loop buffer
|
||||
mcrle p15, 0, r0, c1, c0, 1 @ write aux control register
|
||||
#endif
|
||||
b __errata_finish
|
||||
|
||||
__v7_pj4b_setup:
|
||||
#ifdef CONFIG_CPU_PJ4B
|
||||
|
||||
@@ -339,96 +417,38 @@ __v7_setup:
|
||||
bl v7_flush_dcache_louis
|
||||
ldmia r12, {r0-r5, r7, r9, r11, lr}
|
||||
|
||||
mrc p15, 0, r0, c0, c0, 0 @ read main ID register
|
||||
and r10, r0, #0xff000000 @ ARM?
|
||||
teq r10, #0x41000000
|
||||
bne 3f
|
||||
and r5, r0, #0x00f00000 @ variant
|
||||
and r6, r0, #0x0000000f @ revision
|
||||
orr r6, r6, r5, lsr #20-4 @ combine variant and revision
|
||||
ubfx r0, r0, #4, #12 @ primary part number
|
||||
and r0, r9, #0xff000000 @ ARM?
|
||||
teq r0, #0x41000000
|
||||
bne __errata_finish
|
||||
and r3, r9, #0x00f00000 @ variant
|
||||
and r6, r9, #0x0000000f @ revision
|
||||
orr r6, r6, r3, lsr #20-4 @ combine variant and revision
|
||||
ubfx r0, r9, #4, #12 @ primary part number
|
||||
|
||||
/* Cortex-A8 Errata */
|
||||
ldr r10, =0x00000c08 @ Cortex-A8 primary part number
|
||||
teq r0, r10
|
||||
bne 2f
|
||||
#if defined(CONFIG_ARM_ERRATA_430973) && !defined(CONFIG_ARCH_MULTIPLATFORM)
|
||||
|
||||
teq r5, #0x00100000 @ only present in r1p*
|
||||
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
|
||||
orreq r10, r10, #(1 << 6) @ set IBE to 1
|
||||
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_ERRATA_458693
|
||||
teq r6, #0x20 @ only present in r2p0
|
||||
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
|
||||
orreq r10, r10, #(1 << 5) @ set L1NEON to 1
|
||||
orreq r10, r10, #(1 << 9) @ set PLDNOP to 1
|
||||
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_ERRATA_460075
|
||||
teq r6, #0x20 @ only present in r2p0
|
||||
mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register
|
||||
tsteq r10, #1 << 22
|
||||
orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit
|
||||
mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register
|
||||
#endif
|
||||
b 3f
|
||||
beq __ca8_errata
|
||||
|
||||
/* Cortex-A9 Errata */
|
||||
2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number
|
||||
ldr r10, =0x00000c09 @ Cortex-A9 primary part number
|
||||
teq r0, r10
|
||||
bne 3f
|
||||
#ifdef CONFIG_ARM_ERRATA_742230
|
||||
cmp r6, #0x22 @ only present up to r2p2
|
||||
mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
|
||||
orrle r10, r10, #1 << 4 @ set bit #4
|
||||
mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_ERRATA_742231
|
||||
teq r6, #0x20 @ present in r2p0
|
||||
teqne r6, #0x21 @ present in r2p1
|
||||
teqne r6, #0x22 @ present in r2p2
|
||||
mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
|
||||
orreq r10, r10, #1 << 12 @ set bit #12
|
||||
orreq r10, r10, #1 << 22 @ set bit #22
|
||||
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_ERRATA_743622
|
||||
teq r5, #0x00200000 @ only present in r2p*
|
||||
mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
|
||||
orreq r10, r10, #1 << 6 @ set bit #6
|
||||
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
|
||||
#endif
|
||||
#if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP)
|
||||
ALT_SMP(cmp r6, #0x30) @ present prior to r3p0
|
||||
ALT_UP_B(1f)
|
||||
mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register
|
||||
orrlt r10, r10, #1 << 11 @ set bit #11
|
||||
mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
|
||||
1:
|
||||
#endif
|
||||
beq __ca9_errata
|
||||
|
||||
/* Cortex-A15 Errata */
|
||||
3: ldr r10, =0x00000c0f @ Cortex-A15 primary part number
|
||||
ldr r10, =0x00000c0f @ Cortex-A15 primary part number
|
||||
teq r0, r10
|
||||
bne 4f
|
||||
beq __ca15_errata
|
||||
|
||||
#ifdef CONFIG_ARM_ERRATA_773022
|
||||
cmp r6, #0x4 @ only present up to r0p4
|
||||
mrcle p15, 0, r10, c1, c0, 1 @ read aux control register
|
||||
orrle r10, r10, #1 << 1 @ disable loop buffer
|
||||
mcrle p15, 0, r10, c1, c0, 1 @ write aux control register
|
||||
#endif
|
||||
|
||||
4: mov r10, #0
|
||||
__errata_finish:
|
||||
mov r10, #0
|
||||
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
|
||||
#ifdef CONFIG_MMU
|
||||
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
|
||||
v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
|
||||
ldr r5, =PRRR @ PRRR
|
||||
v7_ttb_setup r10, r4, r5, r8, r3 @ TTBCR, TTBRx setup
|
||||
ldr r3, =PRRR @ PRRR
|
||||
ldr r6, =NMRR @ NMRR
|
||||
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
|
||||
mcr p15, 0, r3, c10, c2, 0 @ write PRRR
|
||||
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
|
||||
#endif
|
||||
dsb @ Complete invalidations
|
||||
@@ -437,22 +457,22 @@ __v7_setup:
|
||||
and r0, r0, #(0xf << 12) @ ThumbEE enabled field
|
||||
teq r0, #(1 << 12) @ check if ThumbEE is present
|
||||
bne 1f
|
||||
mov r5, #0
|
||||
mcr p14, 6, r5, c1, c0, 0 @ Initialize TEEHBR to 0
|
||||
mov r3, #0
|
||||
mcr p14, 6, r3, c1, c0, 0 @ Initialize TEEHBR to 0
|
||||
mrc p14, 6, r0, c0, c0, 0 @ load TEECR
|
||||
orr r0, r0, #1 @ set the 1st bit in order to
|
||||
mcr p14, 6, r0, c0, c0, 0 @ stop userspace TEEHBR access
|
||||
1:
|
||||
#endif
|
||||
adr r5, v7_crval
|
||||
ldmia r5, {r5, r6}
|
||||
adr r3, v7_crval
|
||||
ldmia r3, {r3, r6}
|
||||
ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
|
||||
#ifdef CONFIG_SWP_EMULATE
|
||||
orr r5, r5, #(1 << 10) @ set SW bit in "clear"
|
||||
orr r3, r3, #(1 << 10) @ set SW bit in "clear"
|
||||
bic r6, r6, #(1 << 10) @ clear it in "mmuset"
|
||||
#endif
|
||||
mrc p15, 0, r0, c1, c0, 0 @ read control register
|
||||
bic r0, r0, r5 @ clear bits them
|
||||
bic r0, r0, r3 @ clear bits them
|
||||
orr r0, r0, r6 @ set them
|
||||
THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
|
||||
ret lr @ return to head.S:__ret
|
||||
|
88
arch/arm/mm/pv-fixup-asm.S
Normal file
88
arch/arm/mm/pv-fixup-asm.S
Normal file
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
* Copyright (C) 2015 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This assembly is required to safely remap the physical address space
|
||||
* for Keystone 2
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
.section ".idmap.text", "ax"
|
||||
|
||||
#define L1_ORDER 3
|
||||
#define L2_ORDER 3
|
||||
|
||||
ENTRY(lpae_pgtables_remap_asm)
|
||||
stmfd sp!, {r4-r8, lr}
|
||||
|
||||
mrc p15, 0, r8, c1, c0, 0 @ read control reg
|
||||
bic ip, r8, #CR_M @ disable caches and MMU
|
||||
mcr p15, 0, ip, c1, c0, 0
|
||||
dsb
|
||||
isb
|
||||
|
||||
/* Update level 2 entries covering the kernel */
|
||||
ldr r6, =(_end - 1)
|
||||
add r7, r2, #0x1000
|
||||
add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER
|
||||
add r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER)
|
||||
1: ldrd r4, [r7]
|
||||
adds r4, r4, r0
|
||||
adc r5, r5, r1
|
||||
strd r4, [r7], #1 << L2_ORDER
|
||||
cmp r7, r6
|
||||
bls 1b
|
||||
|
||||
/* Update level 2 entries for the boot data */
|
||||
add r7, r2, #0x1000
|
||||
add r7, r7, r3, lsr #SECTION_SHIFT - L2_ORDER
|
||||
bic r7, r7, #(1 << L2_ORDER) - 1
|
||||
ldrd r4, [r7]
|
||||
adds r4, r4, r0
|
||||
adc r5, r5, r1
|
||||
strd r4, [r7], #1 << L2_ORDER
|
||||
ldrd r4, [r7]
|
||||
adds r4, r4, r0
|
||||
adc r5, r5, r1
|
||||
strd r4, [r7]
|
||||
|
||||
/* Update level 1 entries */
|
||||
mov r6, #4
|
||||
mov r7, r2
|
||||
2: ldrd r4, [r7]
|
||||
adds r4, r4, r0
|
||||
adc r5, r5, r1
|
||||
strd r4, [r7], #1 << L1_ORDER
|
||||
subs r6, r6, #1
|
||||
bne 2b
|
||||
|
||||
mrrc p15, 0, r4, r5, c2 @ read TTBR0
|
||||
adds r4, r4, r0 @ update physical address
|
||||
adc r5, r5, r1
|
||||
mcrr p15, 0, r4, r5, c2 @ write back TTBR0
|
||||
mrrc p15, 1, r4, r5, c2 @ read TTBR1
|
||||
adds r4, r4, r0 @ update physical address
|
||||
adc r5, r5, r1
|
||||
mcrr p15, 1, r4, r5, c2 @ write back TTBR1
|
||||
|
||||
dsb
|
||||
|
||||
mov ip, #0
|
||||
mcr p15, 0, ip, c7, c5, 0 @ I+BTB cache invalidate
|
||||
mcr p15, 0, ip, c8, c7, 0 @ local_flush_tlb_all()
|
||||
dsb
|
||||
isb
|
||||
|
||||
mcr p15, 0, r8, c1, c0, 0 @ re-enable MMU
|
||||
dsb
|
||||
isb
|
||||
|
||||
ldmfd sp!, {r4-r8, pc}
|
||||
ENDPROC(lpae_pgtables_remap_asm)
|
Reference in New Issue
Block a user