mm: convert totalram_pages and totalhigh_pages variables to atomic
totalram_pages and totalhigh_pages are made static inline function. Main motivation was that managed_page_count_lock handling was complicating things. It was discussed in length here, https://lore.kernel.org/patchwork/patch/995739/#1181785 So it seemes better to remove the lock and convert variables to atomic, with preventing poteintial store-to-read tearing as a bonus. [akpm@linux-foundation.org: coding style fixes] Link: http://lkml.kernel.org/r/1542090790-21750-4-git-send-email-arunks@codeaurora.org Signed-off-by: Arun KS <arunks@codeaurora.org> Suggested-by: Michal Hocko <mhocko@suse.com> Suggested-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
@@ -105,9 +105,8 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
|
||||
}
|
||||
#endif
|
||||
|
||||
unsigned long totalhigh_pages __read_mostly;
|
||||
EXPORT_SYMBOL(totalhigh_pages);
|
||||
|
||||
atomic_long_t _totalhigh_pages __read_mostly;
|
||||
EXPORT_SYMBOL(_totalhigh_pages);
|
||||
|
||||
EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
|
||||
|
||||
|
@@ -420,7 +420,7 @@ static int __init hugepage_init(void)
|
||||
* where the extra memory used could hurt more than TLB overhead
|
||||
* is likely to save. The admin can still enable it through /sys.
|
||||
*/
|
||||
if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
|
||||
if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
|
||||
transparent_hugepage_flags = 0;
|
||||
return 0;
|
||||
}
|
||||
|
@@ -237,7 +237,7 @@ void quarantine_reduce(void)
|
||||
* Update quarantine size in case of hotplug. Allocate a fraction of
|
||||
* the installed memory to quarantine minus per-cpu queue limits.
|
||||
*/
|
||||
total_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
|
||||
total_size = (totalram_pages() << PAGE_SHIFT) /
|
||||
QUARANTINE_FRACTION;
|
||||
percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
|
||||
new_quarantine_size = (total_size < percpu_quarantines) ?
|
||||
|
@@ -1576,7 +1576,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
|
||||
|
||||
for (; cursor < end; cursor++) {
|
||||
memblock_free_pages(pfn_to_page(cursor), cursor, 0);
|
||||
totalram_pages++;
|
||||
totalram_pages_inc();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1978,7 +1978,7 @@ unsigned long __init memblock_free_all(void)
|
||||
reset_all_zones_managed_pages();
|
||||
|
||||
pages = free_low_memory_core_early();
|
||||
totalram_pages += pages;
|
||||
totalram_pages_add(pages);
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
@@ -146,7 +146,7 @@ static void __meminit mm_compute_batch(void)
|
||||
s32 batch = max_t(s32, nr*2, 32);
|
||||
|
||||
/* batch size set to 0.4% of (total memory/#cpus), or max int32 */
|
||||
memsized_batch = min_t(u64, (totalram_pages/nr)/256, 0x7fffffff);
|
||||
memsized_batch = min_t(u64, (totalram_pages()/nr)/256, 0x7fffffff);
|
||||
|
||||
vm_committed_as_batch = max_t(s32, memsized_batch, batch);
|
||||
}
|
||||
|
@@ -269,7 +269,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc)
|
||||
}
|
||||
|
||||
/* Default to all available memory */
|
||||
oc->totalpages = totalram_pages + total_swap_pages;
|
||||
oc->totalpages = totalram_pages() + total_swap_pages;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_NUMA))
|
||||
return CONSTRAINT_NONE;
|
||||
|
@@ -16,6 +16,7 @@
|
||||
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pagemap.h>
|
||||
@@ -124,7 +125,8 @@ EXPORT_SYMBOL(node_states);
|
||||
/* Protect totalram_pages and zone->managed_pages */
|
||||
static DEFINE_SPINLOCK(managed_page_count_lock);
|
||||
|
||||
unsigned long totalram_pages __read_mostly;
|
||||
atomic_long_t _totalram_pages __read_mostly;
|
||||
EXPORT_SYMBOL(_totalram_pages);
|
||||
unsigned long totalreserve_pages __read_mostly;
|
||||
unsigned long totalcma_pages __read_mostly;
|
||||
|
||||
@@ -4747,11 +4749,11 @@ EXPORT_SYMBOL_GPL(si_mem_available);
|
||||
|
||||
void si_meminfo(struct sysinfo *val)
|
||||
{
|
||||
val->totalram = totalram_pages;
|
||||
val->totalram = totalram_pages();
|
||||
val->sharedram = global_node_page_state(NR_SHMEM);
|
||||
val->freeram = global_zone_page_state(NR_FREE_PAGES);
|
||||
val->bufferram = nr_blockdev_pages();
|
||||
val->totalhigh = totalhigh_pages;
|
||||
val->totalhigh = totalhigh_pages();
|
||||
val->freehigh = nr_free_highpages();
|
||||
val->mem_unit = PAGE_SIZE;
|
||||
}
|
||||
@@ -7077,10 +7079,10 @@ void adjust_managed_page_count(struct page *page, long count)
|
||||
{
|
||||
spin_lock(&managed_page_count_lock);
|
||||
atomic_long_add(count, &page_zone(page)->managed_pages);
|
||||
totalram_pages += count;
|
||||
totalram_pages_add(count);
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (PageHighMem(page))
|
||||
totalhigh_pages += count;
|
||||
totalhigh_pages_add(count);
|
||||
#endif
|
||||
spin_unlock(&managed_page_count_lock);
|
||||
}
|
||||
@@ -7123,9 +7125,9 @@ EXPORT_SYMBOL(free_reserved_area);
|
||||
void free_highmem_page(struct page *page)
|
||||
{
|
||||
__free_reserved_page(page);
|
||||
totalram_pages++;
|
||||
totalram_pages_inc();
|
||||
atomic_long_inc(&page_zone(page)->managed_pages);
|
||||
totalhigh_pages++;
|
||||
totalhigh_pages_inc();
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -7174,10 +7176,10 @@ void __init mem_init_print_info(const char *str)
|
||||
physpages << (PAGE_SHIFT - 10),
|
||||
codesize >> 10, datasize >> 10, rosize >> 10,
|
||||
(init_data_size + init_code_size) >> 10, bss_size >> 10,
|
||||
(physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
|
||||
(physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10),
|
||||
totalcma_pages << (PAGE_SHIFT - 10),
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
totalhigh_pages << (PAGE_SHIFT - 10),
|
||||
totalhigh_pages() << (PAGE_SHIFT - 10),
|
||||
#endif
|
||||
str ? ", " : "", str ? str : "");
|
||||
}
|
||||
|
@@ -109,13 +109,14 @@ struct shmem_falloc {
|
||||
#ifdef CONFIG_TMPFS
|
||||
static unsigned long shmem_default_max_blocks(void)
|
||||
{
|
||||
return totalram_pages / 2;
|
||||
return totalram_pages() / 2;
|
||||
}
|
||||
|
||||
static unsigned long shmem_default_max_inodes(void)
|
||||
{
|
||||
unsigned long nr_pages = totalram_pages;
|
||||
return min(nr_pages - totalhigh_pages, nr_pages / 2);
|
||||
unsigned long nr_pages = totalram_pages();
|
||||
|
||||
return min(nr_pages - totalhigh_pages(), nr_pages / 2);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -3302,7 +3303,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
|
||||
size = memparse(value,&rest);
|
||||
if (*rest == '%') {
|
||||
size <<= PAGE_SHIFT;
|
||||
size *= totalram_pages;
|
||||
size *= totalram_pages();
|
||||
do_div(size, 100);
|
||||
rest++;
|
||||
}
|
||||
|
@@ -1235,7 +1235,7 @@ void __init kmem_cache_init(void)
|
||||
* page orders on machines with more than 32MB of memory if
|
||||
* not overridden on the command line.
|
||||
*/
|
||||
if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
|
||||
if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT)
|
||||
slab_max_order = SLAB_MAX_ORDER_HI;
|
||||
|
||||
/* Bootstrap is tricky, because several objects are allocated
|
||||
|
@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL(pagevec_lookup_range_nr_tag);
|
||||
*/
|
||||
void __init swap_setup(void)
|
||||
{
|
||||
unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
|
||||
unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
|
||||
|
||||
/* Use a smaller cluster for small-memory machines */
|
||||
if (megs < 16)
|
||||
|
@@ -593,7 +593,7 @@ unsigned long vm_commit_limit(void)
|
||||
if (sysctl_overcommit_kbytes)
|
||||
allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
|
||||
else
|
||||
allowed = ((totalram_pages - hugetlb_total_pages())
|
||||
allowed = ((totalram_pages() - hugetlb_total_pages())
|
||||
* sysctl_overcommit_ratio / 100);
|
||||
allowed += total_swap_pages;
|
||||
|
||||
|
@@ -1634,7 +1634,7 @@ void *vmap(struct page **pages, unsigned int count,
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (count > totalram_pages)
|
||||
if (count > totalram_pages())
|
||||
return NULL;
|
||||
|
||||
size = (unsigned long)count << PAGE_SHIFT;
|
||||
@@ -1739,7 +1739,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
||||
unsigned long real_size = size;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
|
||||
if (!size || (size >> PAGE_SHIFT) > totalram_pages())
|
||||
goto fail;
|
||||
|
||||
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
|
||||
|
@@ -549,7 +549,7 @@ static int __init workingset_init(void)
|
||||
* double the initial memory by using totalram_pages as-is.
|
||||
*/
|
||||
timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
|
||||
max_order = fls_long(totalram_pages - 1);
|
||||
max_order = fls_long(totalram_pages() - 1);
|
||||
if (max_order > timestamp_bits)
|
||||
bucket_order = max_order - timestamp_bits;
|
||||
pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
|
||||
|
@@ -219,8 +219,8 @@ static const struct zpool_ops zswap_zpool_ops = {
|
||||
|
||||
static bool zswap_is_full(void)
|
||||
{
|
||||
return totalram_pages * zswap_max_pool_percent / 100 <
|
||||
DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
|
||||
return totalram_pages() * zswap_max_pool_percent / 100 <
|
||||
DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static void zswap_update_total_size(void)
|
||||
|
Reference in New Issue
Block a user