Merge branches 'debug', 'fixes', 'l2c' (early part), 'misc' and 'sa1100' into for-next

This commit is contained in:
Russell King
2015-02-10 10:26:27 +00:00
51 ändrade filer med 957 tillägg och 1258 borttagningar

Visa fil

@@ -1012,6 +1012,7 @@ config ARCH_SUPPORTS_BIG_ENDIAN
config ARM_KERNMEM_PERMS
bool "Restrict kernel memory permissions"
depends on MMU
help
If this is set, kernel memory other than kernel text (and rodata)
will be made non-executable. The tradeoff is that each region is

Visa fil

@@ -41,12 +41,14 @@ struct l2c_init_data {
void (*enable)(void __iomem *, u32, unsigned);
void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
void (*save)(void __iomem *);
void (*configure)(void __iomem *);
struct outer_cache_fns outer_cache;
};
#define CACHE_LINE_SIZE 32
static void __iomem *l2x0_base;
static const struct l2c_init_data *l2x0_data;
static DEFINE_RAW_SPINLOCK(l2x0_lock);
static u32 l2x0_way_mask; /* Bitmask of active ways */
static u32 l2x0_size;
@@ -106,6 +108,19 @@ static inline void l2c_unlock(void __iomem *base, unsigned num)
}
}
static void l2c_configure(void __iomem *base)
{
if (outer_cache.configure) {
outer_cache.configure(&l2x0_saved_regs);
return;
}
if (l2x0_data->configure)
l2x0_data->configure(base);
l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
}
/*
* Enable the L2 cache controller. This function must only be
* called when the cache controller is known to be disabled.
@@ -114,7 +129,12 @@ static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
{
unsigned long flags;
l2c_write_sec(aux, base, L2X0_AUX_CTRL);
/* Do not touch the controller if already enabled. */
if (readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)
return;
l2x0_saved_regs.aux_ctrl = aux;
l2c_configure(base);
l2c_unlock(base, num_lock);
@@ -136,78 +156,16 @@ static void l2c_disable(void)
dsb(st);
}
#ifdef CONFIG_CACHE_PL310
static inline void cache_wait(void __iomem *reg, unsigned long mask)
{
/* cache operations by line are atomic on PL310 */
}
#else
#define cache_wait l2c_wait_mask
#endif
static inline void cache_sync(void)
{
void __iomem *base = l2x0_base;
writel_relaxed(0, base + sync_reg_offset);
cache_wait(base + L2X0_CACHE_SYNC, 1);
}
#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
static inline void debug_writel(unsigned long val)
{
l2c_set_debug(l2x0_base, val);
}
#else
/* Optimised out for non-errata case */
static inline void debug_writel(unsigned long val)
{
}
#endif
static void l2x0_cache_sync(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
cache_sync();
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void __l2x0_flush_all(void)
{
debug_writel(0x03);
__l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
cache_sync();
debug_writel(0x00);
}
static void l2x0_flush_all(void)
{
unsigned long flags;
/* clean all ways */
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2x0_flush_all();
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_disable(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2x0_flush_all();
l2c_write_sec(0, l2x0_base, L2X0_CTRL);
dsb(st);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2c_save(void __iomem *base)
{
l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
}
static void l2c_resume(void)
{
l2c_enable(l2x0_base, l2x0_saved_regs.aux_ctrl, l2x0_data->num_lock);
}
/*
* L2C-210 specific code.
*
@@ -288,14 +246,6 @@ static void l2c210_sync(void)
__l2c210_cache_sync(l2x0_base);
}
static void l2c210_resume(void)
{
void __iomem *base = l2x0_base;
if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
}
static const struct l2c_init_data l2c210_data __initconst = {
.type = "L2C-210",
.way_size_0 = SZ_8K,
@@ -309,7 +259,7 @@ static const struct l2c_init_data l2c210_data __initconst = {
.flush_all = l2c210_flush_all,
.disable = l2c_disable,
.sync = l2c210_sync,
.resume = l2c210_resume,
.resume = l2c_resume,
},
};
@@ -466,7 +416,7 @@ static const struct l2c_init_data l2c220_data = {
.flush_all = l2c220_flush_all,
.disable = l2c_disable,
.sync = l2c220_sync,
.resume = l2c210_resume,
.resume = l2c_resume,
},
};
@@ -615,39 +565,29 @@ static void __init l2c310_save(void __iomem *base)
L310_POWER_CTRL);
}
static void l2c310_resume(void)
static void l2c310_configure(void __iomem *base)
{
void __iomem *base = l2x0_base;
unsigned revision;
if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
unsigned revision;
/* restore pl310 setup */
l2c_write_sec(l2x0_saved_regs.tag_latency, base,
L310_TAG_LATENCY_CTRL);
l2c_write_sec(l2x0_saved_regs.data_latency, base,
L310_DATA_LATENCY_CTRL);
l2c_write_sec(l2x0_saved_regs.filter_end, base,
L310_ADDR_FILTER_END);
l2c_write_sec(l2x0_saved_regs.filter_start, base,
L310_ADDR_FILTER_START);
/* restore pl310 setup */
writel_relaxed(l2x0_saved_regs.tag_latency,
base + L310_TAG_LATENCY_CTRL);
writel_relaxed(l2x0_saved_regs.data_latency,
base + L310_DATA_LATENCY_CTRL);
writel_relaxed(l2x0_saved_regs.filter_end,
base + L310_ADDR_FILTER_END);
writel_relaxed(l2x0_saved_regs.filter_start,
base + L310_ADDR_FILTER_START);
revision = readl_relaxed(base + L2X0_CACHE_ID) &
L2X0_CACHE_ID_RTL_MASK;
revision = readl_relaxed(base + L2X0_CACHE_ID) &
L2X0_CACHE_ID_RTL_MASK;
if (revision >= L310_CACHE_ID_RTL_R2P0)
l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
L310_PREFETCH_CTRL);
if (revision >= L310_CACHE_ID_RTL_R3P0)
l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
L310_POWER_CTRL);
l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
/* Re-enable full-line-of-zeros for Cortex-A9 */
if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
}
if (revision >= L310_CACHE_ID_RTL_R2P0)
l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
L310_PREFETCH_CTRL);
if (revision >= L310_CACHE_ID_RTL_R3P0)
l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
L310_POWER_CTRL);
}
static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
@@ -699,6 +639,23 @@ static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
}
/* r3p0 or later has power control register */
if (rev >= L310_CACHE_ID_RTL_R3P0)
l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN |
L310_STNDBY_MODE_EN;
/*
* Always enable non-secure access to the lockdown registers -
* we write to them as part of the L2C enable sequence so they
* need to be accessible.
*/
aux |= L310_AUX_CTRL_NS_LOCKDOWN;
l2c_enable(base, aux, num_lock);
/* Read back resulting AUX_CTRL value as it could have been altered. */
aux = readl_relaxed(base + L2X0_AUX_CTRL);
if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
@@ -712,23 +669,12 @@ static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
if (rev >= L310_CACHE_ID_RTL_R3P0) {
u32 power_ctrl;
l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN,
base, L310_POWER_CTRL);
power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
}
/*
* Always enable non-secure access to the lockdown registers -
* we write to them as part of the L2C enable sequence so they
* need to be accessible.
*/
aux |= L310_AUX_CTRL_NS_LOCKDOWN;
l2c_enable(base, aux, num_lock);
if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
cpu_notifier(l2c310_cpu_enable_flz, 0);
@@ -760,11 +706,11 @@ static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
if (revision >= L310_CACHE_ID_RTL_R3P0 &&
revision < L310_CACHE_ID_RTL_R3P2) {
u32 val = readl_relaxed(base + L310_PREFETCH_CTRL);
u32 val = l2x0_saved_regs.prefetch_ctrl;
/* I don't think bit23 is required here... but iMX6 does so */
if (val & (BIT(30) | BIT(23))) {
val &= ~(BIT(30) | BIT(23));
l2c_write_sec(val, base, L310_PREFETCH_CTRL);
l2x0_saved_regs.prefetch_ctrl = val;
errata[n++] = "752271";
}
}
@@ -800,6 +746,15 @@ static void l2c310_disable(void)
l2c_disable();
}
static void l2c310_resume(void)
{
l2c_resume();
/* Re-enable full-line-of-zeros for Cortex-A9 */
if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
}
static const struct l2c_init_data l2c310_init_fns __initconst = {
.type = "L2C-310",
.way_size_0 = SZ_8K,
@@ -807,6 +762,7 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
.enable = l2c310_enable,
.fixup = l2c310_fixup,
.save = l2c310_save,
.configure = l2c310_configure,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@@ -818,13 +774,21 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
},
};
static void __init __l2c_init(const struct l2c_init_data *data,
u32 aux_val, u32 aux_mask, u32 cache_id)
static int __init __l2c_init(const struct l2c_init_data *data,
u32 aux_val, u32 aux_mask, u32 cache_id)
{
struct outer_cache_fns fns;
unsigned way_size_bits, ways;
u32 aux, old_aux;
/*
* Save the pointer globally so that callbacks which do not receive
* context from callers can access the structure.
*/
l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
if (!l2x0_data)
return -ENOMEM;
/*
* Sanity check the aux values. aux_mask is the bits we preserve
* from reading the hardware register, and aux_val is the bits we
@@ -884,6 +848,7 @@ static void __init __l2c_init(const struct l2c_init_data *data,
fns = data->outer_cache;
fns.write_sec = outer_cache.write_sec;
fns.configure = outer_cache.configure;
if (data->fixup)
data->fixup(l2x0_base, cache_id, &fns);
@@ -910,6 +875,8 @@ static void __init __l2c_init(const struct l2c_init_data *data,
data->type, ways, l2x0_size >> 10);
pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
data->type, cache_id, aux);
return 0;
}
void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
@@ -936,6 +903,10 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
break;
}
/* Read back current (default) hardware configuration */
if (data->save)
data->save(l2x0_base);
__l2c_init(data, aux_val, aux_mask, cache_id);
}
@@ -1102,7 +1073,7 @@ static const struct l2c_init_data of_l2c210_data __initconst = {
.flush_all = l2c210_flush_all,
.disable = l2c_disable,
.sync = l2c210_sync,
.resume = l2c210_resume,
.resume = l2c_resume,
},
};
@@ -1120,7 +1091,7 @@ static const struct l2c_init_data of_l2c220_data __initconst = {
.flush_all = l2c220_flush_all,
.disable = l2c_disable,
.sync = l2c220_sync,
.resume = l2c210_resume,
.resume = l2c_resume,
},
};
@@ -1131,32 +1102,32 @@ static void __init l2c310_of_parse(const struct device_node *np,
u32 tag[3] = { 0, 0, 0 };
u32 filter[2] = { 0, 0 };
u32 assoc;
u32 prefetch;
u32 val;
int ret;
of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
if (tag[0] && tag[1] && tag[2])
writel_relaxed(
l2x0_saved_regs.tag_latency =
L310_LATENCY_CTRL_RD(tag[0] - 1) |
L310_LATENCY_CTRL_WR(tag[1] - 1) |
L310_LATENCY_CTRL_SETUP(tag[2] - 1),
l2x0_base + L310_TAG_LATENCY_CTRL);
L310_LATENCY_CTRL_SETUP(tag[2] - 1);
of_property_read_u32_array(np, "arm,data-latency",
data, ARRAY_SIZE(data));
if (data[0] && data[1] && data[2])
writel_relaxed(
l2x0_saved_regs.data_latency =
L310_LATENCY_CTRL_RD(data[0] - 1) |
L310_LATENCY_CTRL_WR(data[1] - 1) |
L310_LATENCY_CTRL_SETUP(data[2] - 1),
l2x0_base + L310_DATA_LATENCY_CTRL);
L310_LATENCY_CTRL_SETUP(data[2] - 1);
of_property_read_u32_array(np, "arm,filter-ranges",
filter, ARRAY_SIZE(filter));
if (filter[1]) {
writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
l2x0_base + L310_ADDR_FILTER_END);
writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN,
l2x0_base + L310_ADDR_FILTER_START);
l2x0_saved_regs.filter_end =
ALIGN(filter[0] + filter[1], SZ_1M);
l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1))
| L310_ADDR_FILTER_EN;
}
ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
@@ -1178,6 +1149,58 @@ static void __init l2c310_of_parse(const struct device_node *np,
assoc);
break;
}
prefetch = l2x0_saved_regs.prefetch_ctrl;
ret = of_property_read_u32(np, "arm,double-linefill", &val);
if (ret == 0) {
if (val)
prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL;
else
prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF arm,double-linefill property value is missing\n");
}
ret = of_property_read_u32(np, "arm,double-linefill-incr", &val);
if (ret == 0) {
if (val)
prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
else
prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n");
}
ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val);
if (ret == 0) {
if (!val)
prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
else
prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n");
}
ret = of_property_read_u32(np, "arm,prefetch-drop", &val);
if (ret == 0) {
if (val)
prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP;
else
prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n");
}
ret = of_property_read_u32(np, "arm,prefetch-offset", &val);
if (ret == 0) {
prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK;
prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
}
l2x0_saved_regs.prefetch_ctrl = prefetch;
}
static const struct l2c_init_data of_l2c310_data __initconst = {
@@ -1188,6 +1211,7 @@ static const struct l2c_init_data of_l2c310_data __initconst = {
.enable = l2c310_enable,
.fixup = l2c310_fixup,
.save = l2c310_save,
.configure = l2c310_configure,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@@ -1216,6 +1240,7 @@ static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
.enable = l2c310_enable,
.fixup = l2c310_fixup,
.save = l2c310_save,
.configure = l2c310_configure,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@@ -1231,7 +1256,7 @@ static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
* noninclusive, while the hardware cache range operations use
* inclusive start and end addresses.
*/
static unsigned long calc_range_end(unsigned long start, unsigned long end)
static unsigned long aurora_range_end(unsigned long start, unsigned long end)
{
/*
* Limit the number of cache lines processed at once,
@@ -1250,25 +1275,13 @@ static unsigned long calc_range_end(unsigned long start, unsigned long end)
return end;
}
/*
* Make sure 'start' and 'end' reference the same page, as L2 is PIPT
* and range operations only do a TLB lookup on the start address.
*/
static void aurora_pa_range(unsigned long start, unsigned long end,
unsigned long offset)
unsigned long offset)
{
void __iomem *base = l2x0_base;
unsigned long range_end;
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
writel_relaxed(end, l2x0_base + offset);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
cache_sync();
}
static void aurora_inv_range(unsigned long start, unsigned long end)
{
/*
* round start and end adresses up to cache line size
*/
@@ -1276,15 +1289,24 @@ static void aurora_inv_range(unsigned long start, unsigned long end)
end = ALIGN(end, CACHE_LINE_SIZE);
/*
* Invalidate all full cache lines between 'start' and 'end'.
* perform operation on all full cache lines between 'start' and 'end'
*/
while (start < end) {
unsigned long range_end = calc_range_end(start, end);
aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
AURORA_INVAL_RANGE_REG);
range_end = aurora_range_end(start, end);
raw_spin_lock_irqsave(&l2x0_lock, flags);
writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
writel_relaxed(0, base + AURORA_SYNC_REG);
start = range_end;
}
}
static void aurora_inv_range(unsigned long start, unsigned long end)
{
aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
}
static void aurora_clean_range(unsigned long start, unsigned long end)
{
@@ -1292,36 +1314,47 @@ static void aurora_clean_range(unsigned long start, unsigned long end)
* If L2 is forced to WT, the L2 will always be clean and we
* don't need to do anything here.
*/
if (!l2_wt_override) {
start &= ~(CACHE_LINE_SIZE - 1);
end = ALIGN(end, CACHE_LINE_SIZE);
while (start != end) {
unsigned long range_end = calc_range_end(start, end);
aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
AURORA_CLEAN_RANGE_REG);
start = range_end;
}
}
if (!l2_wt_override)
aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG);
}
static void aurora_flush_range(unsigned long start, unsigned long end)
{
start &= ~(CACHE_LINE_SIZE - 1);
end = ALIGN(end, CACHE_LINE_SIZE);
while (start != end) {
unsigned long range_end = calc_range_end(start, end);
/*
* If L2 is forced to WT, the L2 will always be clean and we
* just need to invalidate.
*/
if (l2_wt_override)
aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
AURORA_INVAL_RANGE_REG);
else
aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
AURORA_FLUSH_RANGE_REG);
start = range_end;
}
if (l2_wt_override)
aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
else
aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG);
}
static void aurora_flush_all(void)
{
void __iomem *base = l2x0_base;
unsigned long flags;
/* clean all ways */
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
writel_relaxed(0, base + AURORA_SYNC_REG);
}
static void aurora_cache_sync(void)
{
writel_relaxed(0, l2x0_base + AURORA_SYNC_REG);
}
static void aurora_disable(void)
{
void __iomem *base = l2x0_base;
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
writel_relaxed(0, base + AURORA_SYNC_REG);
l2c_write_sec(0, base, L2X0_CTRL);
dsb(st);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void aurora_save(void __iomem *base)
@@ -1330,16 +1363,6 @@ static void aurora_save(void __iomem *base)
l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
}
static void aurora_resume(void)
{
void __iomem *base = l2x0_base;
if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
}
}
/*
* For Aurora cache in no outer mode, enable via the CP15 coprocessor
* broadcasting of cache commands to L2.
@@ -1398,10 +1421,10 @@ static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
.inv_range = aurora_inv_range,
.clean_range = aurora_clean_range,
.flush_range = aurora_flush_range,
.flush_all = l2x0_flush_all,
.disable = l2x0_disable,
.sync = l2x0_cache_sync,
.resume = aurora_resume,
.flush_all = aurora_flush_all,
.disable = aurora_disable,
.sync = aurora_cache_sync,
.resume = l2c_resume,
},
};
@@ -1414,7 +1437,7 @@ static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
.fixup = aurora_fixup,
.save = aurora_save,
.outer_cache = {
.resume = aurora_resume,
.resume = l2c_resume,
},
};
@@ -1562,6 +1585,7 @@ static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
.of_parse = l2c310_of_parse,
.enable = l2c310_enable,
.save = l2c310_save,
.configure = l2c310_configure,
.outer_cache = {
.inv_range = bcm_inv_range,
.clean_range = bcm_clean_range,
@@ -1583,18 +1607,12 @@ static void __init tauros3_save(void __iomem *base)
readl_relaxed(base + L310_PREFETCH_CTRL);
}
static void tauros3_resume(void)
static void tauros3_configure(void __iomem *base)
{
void __iomem *base = l2x0_base;
if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
writel_relaxed(l2x0_saved_regs.aux2_ctrl,
base + TAUROS3_AUX2_CTRL);
writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
base + L310_PREFETCH_CTRL);
l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
}
writel_relaxed(l2x0_saved_regs.aux2_ctrl,
base + TAUROS3_AUX2_CTRL);
writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
base + L310_PREFETCH_CTRL);
}
static const struct l2c_init_data of_tauros3_data __initconst = {
@@ -1603,9 +1621,10 @@ static const struct l2c_init_data of_tauros3_data __initconst = {
.num_lock = 8,
.enable = l2c_enable,
.save = tauros3_save,
.configure = tauros3_configure,
/* Tauros3 broadcasts L1 cache operations to L2 */
.outer_cache = {
.resume = tauros3_resume,
.resume = l2c_resume,
},
};
@@ -1661,6 +1680,10 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
if (!of_property_read_bool(np, "cache-unified"))
pr_err("L2C: device tree omits to specify unified cache\n");
/* Read back current (default) hardware configuration */
if (data->save)
data->save(l2x0_base);
/* L2 configuration can only be changed if the cache is disabled */
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
if (data->of_parse)
@@ -1671,8 +1694,6 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
else
cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
__l2c_init(data, aux_val, aux_mask, cache_id);
return 0;
return __l2c_init(data, aux_val, aux_mask, cache_id);
}
#endif

Visa fil

@@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu)
/* Update the list of reserved ASIDs and the ASID bitmap. */
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
for_each_possible_cpu(i) {
if (i == cpu) {
asid = 0;
} else {
asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
/*
* If this CPU has already been through a
* rollover, but hasn't run another task in
* the meantime, we must preserve its reserved
* ASID, as this is the only trace we have of
* the process it is still running.
*/
if (asid == 0)
asid = per_cpu(reserved_asids, i);
__set_bit(asid & ~ASID_MASK, asid_map);
}
asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
/*
* If this CPU has already been through a
* rollover, but hasn't run another task in
* the meantime, we must preserve its reserved
* ASID, as this is the only trace we have of
* the process it is still running.
*/
if (asid == 0)
asid = per_cpu(reserved_asids, i);
__set_bit(asid & ~ASID_MASK, asid_map);
per_cpu(reserved_asids, i) = asid;
}

Visa fil

@@ -2025,6 +2025,9 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
{
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
if (!mapping)
return;
arm_iommu_detach_device(dev);
arm_iommu_release_mapping(mapping);
}

Visa fil

@@ -220,9 +220,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
static const char units[] = "KMGTPE";
u64 prot = val & pg_level[level].mask;
if (addr < USER_PGTABLES_CEILING)
return;
if (!st->level) {
st->level = level;
st->current_prot = prot;
@@ -308,15 +305,13 @@ static void walk_pgd(struct seq_file *m)
pgd_t *pgd = swapper_pg_dir;
struct pg_state st;
unsigned long addr;
unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE;
unsigned i;
memset(&st, 0, sizeof(st));
st.seq = m;
st.marker = address_markers;
pgd += pgdoff;
for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) {
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
addr = i * PGDIR_SIZE;
if (!pgd_none(*pgd)) {
walk_pud(&st, pgd, addr);

Visa fil

@@ -319,10 +319,7 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
early_init_fdt_scan_reserved_mem();
/*
* reserve memory for DMA contigouos allocations,
* must come from DMA area inside low memory
*/
/* reserve memory for DMA contiguous allocations */
dma_contiguous_reserve(arm_dma_limit);
arm_memblock_steal_permitted = false;
@@ -658,8 +655,8 @@ static struct section_perm ro_perms[] = {
.start = (unsigned long)_stext,
.end = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE
.mask = ~PMD_SECT_RDONLY,
.prot = PMD_SECT_RDONLY,
.mask = ~L_PMD_SECT_RDONLY,
.prot = L_PMD_SECT_RDONLY,
#else
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,

Visa fil

@@ -1329,8 +1329,8 @@ static void __init kmap_init(void)
static void __init map_lowmem(void)
{
struct memblock_region *reg;
unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {