ANDROID: arm64/mm: Add command line option to make ZONE_DMA32 empty

ZONE_DMA32 is enabled by default on android12-5.10, yet it is not
needed for all devices, nor is it desirable to have if not needed. For
instance, if a partner in GKI 1.0 did not use ZONE_DMA32, memory can
be lower for ZONE_NORMAL relative to older targets, such that memory
would run out more quickly in ZONE_NORMAL leading kswapd to be invoked
unnecessarily.

Correspondingly, provide a means of making ZONE_DMA32 empty via the
kernel command line when it is compiled in via CONFIG_ZONE_DMA32.

Bug: 199917449
Change-Id: I70ec76914b92e518d61a61072f0b3cb41cb28646
Signed-off-by: Chris Goldsworthy <quic_cgoldswo@quicinc.com>
This commit is contained in:
Chris Goldsworthy
2021-10-05 23:02:29 -07:00
parent f8f6c7332b
commit c3c2bb34ac
5 changed files with 68 additions and 8 deletions

View File

@@ -62,6 +62,12 @@ EXPORT_SYMBOL(memstart_addr);
*/ */
phys_addr_t arm64_dma_phys_limit __ro_after_init; phys_addr_t arm64_dma_phys_limit __ro_after_init;
/*
* Provide a run-time mean of disabling ZONE_DMA32 if it is enabled via
* CONFIG_ZONE_DMA32.
*/
static bool disable_dma32 __ro_after_init;
#ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_KEXEC_CORE
/* /*
* reserve_crashkernel() - reserves memory for crash kernel * reserve_crashkernel() - reserves memory for crash kernel
@@ -207,7 +213,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit); max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif #endif
#ifdef CONFIG_ZONE_DMA32 #ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); max_zone_pfns[ZONE_DMA32] = disable_dma32 ? 0 : PFN_DOWN(dma32_phys_limit);
if (!arm64_dma_phys_limit) if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = dma32_phys_limit; arm64_dma_phys_limit = dma32_phys_limit;
#endif #endif
@@ -218,6 +224,18 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
free_area_init(max_zone_pfns); free_area_init(max_zone_pfns);
} }
static int __init early_disable_dma32(char *buf)
{
if (!buf)
return -EINVAL;
if (!strcmp(buf, "on"))
disable_dma32 = true;
return 0;
}
early_param("disable_dma32", early_disable_dma32);
int pfn_valid(unsigned long pfn) int pfn_valid(unsigned long pfn)
{ {
phys_addr_t addr = pfn << PAGE_SHIFT; phys_addr_t addr = pfn << PAGE_SHIFT;

View File

@@ -102,6 +102,12 @@ static unsigned long min_pfn_mapped;
static bool __initdata can_use_brk_pgt = true; static bool __initdata can_use_brk_pgt = true;
/*
* Provide a run-time mean of disabling ZONE_DMA32 if it is enabled via
* CONFIG_ZONE_DMA32.
*/
static bool disable_dma32 __ro_after_init;
/* /*
* Pages returned are already directly mapped. * Pages returned are already directly mapped.
* *
@@ -996,7 +1002,7 @@ void __init zone_sizes_init(void)
max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn); max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn);
#endif #endif
#ifdef CONFIG_ZONE_DMA32 #ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn); max_zone_pfns[ZONE_DMA32] = disable_dma32 ? 0 : min(MAX_DMA32_PFN, max_low_pfn);
#endif #endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn; max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
@@ -1006,6 +1012,18 @@ void __init zone_sizes_init(void)
free_area_init(max_zone_pfns); free_area_init(max_zone_pfns);
} }
static int __init early_disable_dma32(char *buf)
{
if (!buf)
return -EINVAL;
if (!strcmp(buf, "on"))
disable_dma32 = true;
return 0;
}
early_param("disable_dma32", early_disable_dma32);
__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
.loaded_mm = &init_mm, .loaded_mm = &init_mm,
.next_asid = 1, .next_asid = 1,

View File

@@ -24,6 +24,28 @@ struct bus_dma_region {
u64 offset; u64 offset;
}; };
static inline bool zone_dma32_is_empty(int node)
{
#ifdef CONFIG_ZONE_DMA32
pg_data_t *pgdat = NODE_DATA(node);
return zone_is_empty(&pgdat->node_zones[ZONE_DMA32]);
#else
return true;
#endif
}
static inline bool zone_dma32_are_empty(void)
{
int node;
for_each_node(node)
if (!zone_dma32_is_empty(node))
return false;
return true;
}
static inline dma_addr_t translate_phys_to_dma(struct device *dev, static inline dma_addr_t translate_phys_to_dma(struct device *dev,
phys_addr_t paddr) phys_addr_t paddr)
{ {

View File

@@ -61,7 +61,8 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
*phys_limit = dma_to_phys(dev, dma_limit); *phys_limit = dma_to_phys(dev, dma_limit);
if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits)) if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
return GFP_DMA; return GFP_DMA;
if (*phys_limit <= DMA_BIT_MASK(32)) if (*phys_limit <= DMA_BIT_MASK(32) &&
!zone_dma32_is_empty(dev_to_node(dev)))
return GFP_DMA32; return GFP_DMA32;
return 0; return 0;
} }
@@ -101,7 +102,8 @@ again:
if (IS_ENABLED(CONFIG_ZONE_DMA32) && if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
phys_limit < DMA_BIT_MASK(64) && phys_limit < DMA_BIT_MASK(64) &&
!(gfp & (GFP_DMA32 | GFP_DMA))) { !(gfp & (GFP_DMA32 | GFP_DMA)) &&
!zone_dma32_is_empty(node)) {
gfp |= GFP_DMA32; gfp |= GFP_DMA32;
goto again; goto again;
} }

View File

@@ -74,7 +74,7 @@ static bool cma_in_zone(gfp_t gfp)
end = cma_get_base(cma) + size - 1; end = cma_get_base(cma) + size - 1;
if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA)) if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
return end <= DMA_BIT_MASK(zone_dma_bits); return end <= DMA_BIT_MASK(zone_dma_bits);
if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32)) if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32) && !zone_dma32_are_empty())
return end <= DMA_BIT_MASK(32); return end <= DMA_BIT_MASK(32);
return true; return true;
} }
@@ -156,7 +156,7 @@ static void atomic_pool_work_fn(struct work_struct *work)
if (IS_ENABLED(CONFIG_ZONE_DMA)) if (IS_ENABLED(CONFIG_ZONE_DMA))
atomic_pool_resize(atomic_pool_dma, atomic_pool_resize(atomic_pool_dma,
GFP_KERNEL | GFP_DMA); GFP_KERNEL | GFP_DMA);
if (IS_ENABLED(CONFIG_ZONE_DMA32)) if (IS_ENABLED(CONFIG_ZONE_DMA32) && !zone_dma32_are_empty())
atomic_pool_resize(atomic_pool_dma32, atomic_pool_resize(atomic_pool_dma32,
GFP_KERNEL | GFP_DMA32); GFP_KERNEL | GFP_DMA32);
atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL); atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
@@ -212,7 +212,7 @@ static int __init dma_atomic_pool_init(void)
if (!atomic_pool_dma) if (!atomic_pool_dma)
ret = -ENOMEM; ret = -ENOMEM;
} }
if (IS_ENABLED(CONFIG_ZONE_DMA32)) { if (IS_ENABLED(CONFIG_ZONE_DMA32) && !zone_dma32_are_empty()) {
atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size, atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
GFP_KERNEL | GFP_DMA32); GFP_KERNEL | GFP_DMA32);
if (!atomic_pool_dma32) if (!atomic_pool_dma32)
@@ -227,7 +227,7 @@ postcore_initcall(dma_atomic_pool_init);
static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp) static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
{ {
if (prev == NULL) { if (prev == NULL) {
if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32)) if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32) && !zone_dma32_are_empty())
return atomic_pool_dma32; return atomic_pool_dma32;
if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA)) if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
return atomic_pool_dma; return atomic_pool_dma;