Merge 449dc8c970 ("Merge tag 'for-v5.9' of git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-power-supply") into android-mainline
Merges along the way to 5.9-rc1 resolves conflicts in: Documentation/ABI/testing/sysfs-class-power drivers/power/supply/power_supply_sysfs.c fs/crypto/inline_crypt.c Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ia087834f54fb4e5269d68c3c404747ceed240701
This commit is contained in:
@@ -88,12 +88,10 @@ static inline bool is_migrate_movable(int mt)
|
||||
|
||||
extern int page_group_by_mobility_disabled;
|
||||
|
||||
#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
|
||||
#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
|
||||
#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
|
||||
|
||||
#define get_pageblock_migratetype(page) \
|
||||
get_pfnblock_flags_mask(page, page_to_pfn(page), \
|
||||
PB_migrate_end, MIGRATETYPE_MASK)
|
||||
get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
|
||||
|
||||
struct free_area {
|
||||
struct list_head free_list[MIGRATE_TYPES];
|
||||
@@ -155,10 +153,6 @@ enum zone_stat_item {
|
||||
NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
|
||||
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
|
||||
NR_PAGETABLE, /* used for pagetables */
|
||||
NR_KERNEL_STACK_KB, /* measured in KiB */
|
||||
#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
|
||||
NR_KERNEL_SCS_KB, /* measured in KiB */
|
||||
#endif
|
||||
/* Second 128 byte cacheline */
|
||||
NR_BOUNCE,
|
||||
NR_ZSPAGES, /* allocated in zsmalloc */
|
||||
@@ -172,8 +166,8 @@ enum node_stat_item {
|
||||
NR_INACTIVE_FILE, /* " " " " " */
|
||||
NR_ACTIVE_FILE, /* " " " " " */
|
||||
NR_UNEVICTABLE, /* " " " " " */
|
||||
NR_SLAB_RECLAIMABLE,
|
||||
NR_SLAB_UNRECLAIMABLE,
|
||||
NR_SLAB_RECLAIMABLE_B,
|
||||
NR_SLAB_UNRECLAIMABLE_B,
|
||||
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
|
||||
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
|
||||
WORKINGSET_NODES,
|
||||
@@ -201,9 +195,33 @@ enum node_stat_item {
|
||||
NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
|
||||
NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
|
||||
NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
|
||||
NR_KERNEL_STACK_KB, /* measured in KiB */
|
||||
#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
|
||||
NR_KERNEL_SCS_KB, /* measured in KiB */
|
||||
#endif
|
||||
NR_VM_NODE_STAT_ITEMS
|
||||
};
|
||||
|
||||
/*
|
||||
* Returns true if the value is measured in bytes (most vmstat values are
|
||||
* measured in pages). This defines the API part, the internal representation
|
||||
* might be different.
|
||||
*/
|
||||
static __always_inline bool vmstat_item_in_bytes(int idx)
|
||||
{
|
||||
/*
|
||||
* Global and per-node slab counters track slab pages.
|
||||
* It's expected that changes are multiples of PAGE_SIZE.
|
||||
* Internally values are stored in pages.
|
||||
*
|
||||
* Per-memcg and per-lruvec counters track memory, consumed
|
||||
* by individual slab objects. These counters are actually
|
||||
* byte-precise.
|
||||
*/
|
||||
return (idx == NR_SLAB_RECLAIMABLE_B ||
|
||||
idx == NR_SLAB_UNRECLAIMABLE_B);
|
||||
}
|
||||
|
||||
/*
|
||||
* We do arithmetic on the LRU lists in various places in the code,
|
||||
* so it is important to keep the active lists LRU_ACTIVE higher in
|
||||
@@ -817,18 +835,6 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
|
||||
|
||||
extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
|
||||
|
||||
#ifdef CONFIG_HAVE_MEMORY_PRESENT
|
||||
void memory_present(int nid, unsigned long start, unsigned long end);
|
||||
#else
|
||||
static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SPARSEMEM)
|
||||
void memblocks_present(void);
|
||||
#else
|
||||
static inline void memblocks_present(void) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
|
||||
int local_memory_node(int node_id);
|
||||
#else
|
||||
@@ -1385,8 +1391,6 @@ struct mminit_pfnnid_cache {
|
||||
#define early_pfn_valid(pfn) (1)
|
||||
#endif
|
||||
|
||||
void memory_present(int nid, unsigned long start, unsigned long end);
|
||||
|
||||
/*
|
||||
* If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
|
||||
* need to check pfn validity within that MAX_ORDER_NR_PAGES block.
|
||||
|
||||
Reference in New Issue
Block a user