Merge branch 'android12-5.10' into android12-5.10-lts

Sync up with android12-5.10 for the following commits:

1419b69403 Merge tag 'android12-5.10.101_r00' into android12-5.10
3eec441822 UPSTREAM: usb: gadget: Fix use-after-free bug by not setting udc->dev.driver
821f3e53d9 UPSTREAM: usb: gadget: rndis: prevent integer overflow in rndis_set_response()
39aca15979 FROMGIT: mm/migrate: fix race between lock page and clear PG_Isolated
de0334216b UPSTREAM: arm64: proton-pack: Include unprivileged eBPF status in Spectre v2 mitigation reporting
d236f7b4cb UPSTREAM: arm64: Use the clearbhb instruction in mitigations
98b16e808f UPSTREAM: KVM: arm64: Allow SMCCC_ARCH_WORKAROUND_3 to be discovered and migrated
0f76dfc55d UPSTREAM: arm64: Mitigate spectre style branch history side channels
5411474f65 UPSTREAM: arm64: Do not include __READ_ONCE() block in assembly files
e9a39a642c UPSTREAM: KVM: arm64: Allow indirect vectors to be used without SPECTRE_V3A
fee1ae7c7c UPSTREAM: arm64: proton-pack: Report Spectre-BHB vulnerabilities as part of Spectre-v2
d95b0b4e5d UPSTREAM: arm64: Add percpu vectors for EL1
30180ef431 Revert "BACKPORT: FROMLIST: scsi: core: Reserve one tag for the UFS driver"
28837e415d UPSTREAM: arm64: entry: Add macro for reading symbol addresses from the trampoline
e322fe26a1 UPSTREAM: arm64: entry: Add vectors that have the bhb mitigation sequences
2a90cf9af2 UPSTREAM: arm64: entry: Add non-kpti __bp_harden_el1_vectors for mitigations
0db372ec4b UPSTREAM: arm64: entry: Allow the trampoline text to occupy multiple pages
158c87e50a UPSTREAM: arm64: entry: Make the kpti trampoline's kpti sequence optional
e6408b96a0 UPSTREAM: arm64: entry: Move trampoline macros out of ifdef'd section
00d8bb6b90 UPSTREAM: arm64: entry: Don't assume tramp_vectors is the start of the vectors
0defb52ce6 UPSTREAM: arm64: entry: Allow tramp_alias to access symbols after the 4K boundary
6e48449c91 UPSTREAM: arm64: entry: Move the trampoline data page before the text page
bb8baaf3af UPSTREAM: arm64: entry: Free up another register on kpti's tramp_exit path
32ba6d5d61 UPSTREAM: arm64: entry: Make the trampoline cleanup optional
c8b567d888 UPSTREAM: arm64: spectre: Rename spectre_v4_patch_fw_mitigation_conduit
17867c11a2 UPSTREAM: arm64: entry.S: Add ventry overflow sanity checks
81ec26aafe UPSTREAM: arm64: cpufeature: add HWCAP for FEAT_RPRES
18c4e4fa56 UPSTREAM: arm64: cpufeature: add HWCAP for FEAT_AFP
68bc555a23 UPSTREAM: arm64: add ID_AA64ISAR2_EL1 sys register
2e2eef400b UPSTREAM: arm64: Add HWCAP for self-synchronising virtual counter
6d1f2678e2 UPSTREAM: arm64: Add Cortex-X2 CPU part definition
51eded5d1b UPSTREAM: arm64: cputype: Add CPU implementor & types for the Apple M1 cores
803ff1161c UPSTREAM: binder: Add invalid handle info in user error log
a40cd23755 UPSTREAM: ARM: fix Thumb2 regression with Spectre BHB
56186c7e4a UPSTREAM: ARM: Spectre-BHB: provide empty stub for non-config
1ea0d91588 UPSTREAM: ARM: fix build warning in proc-v7-bugs.c
fdf3cb8a00 UPSTREAM: ARM: Do not use NOCROSSREFS directive with ld.lld
57bc1e13f0 UPSTREAM: ARM: fix co-processor register typo
a4e68d43f5 UPSTREAM: ARM: fix build error when BPF_SYSCALL is disabled
bd2376838d UPSTREAM: ARM: include unprivileged BPF status in Spectre V2 reporting
afbbe4048f UPSTREAM: ARM: Spectre-BHB workaround
5a41f364e7 UPSTREAM: ARM: use LOADADDR() to get load address of sections
3bfcb356df UPSTREAM: ARM: early traps initialisation
5a64a66802 UPSTREAM: ARM: report Spectre v2 status through sysfs
9362cd2b47 UPSTREAM: x86/speculation: Warn about eIBRS + LFENCE + Unprivileged eBPF + SMT
54a2bd029f UPSTREAM: x86/speculation: Warn about Spectre v2 LFENCE mitigation
f1b1f893b4 UPSTREAM: x86/speculation: Update link to AMD speculation whitepaper
c4188388a3 UPSTREAM: x86/speculation: Use generic retpoline by default on AMD
bd02dc4329 UPSTREAM: x86/speculation: Include unprivileged eBPF status in Spectre v2 mitigation reporting
3883503747 UPSTREAM: Documentation/hw-vuln: Update spectre doc
1c3e98581b UPSTREAM: x86/speculation: Add eIBRS + Retpoline options
cc9e9aa4e0 UPSTREAM: x86/speculation: Rename RETPOLINE_AMD to RETPOLINE_LFENCE
414a6076ac UPSTREAM: x86,bugs: Unconditionally allow spectre_v2=retpoline,amd
f27f62fecd UPSTREAM: bpf: Add kconfig knob for disabling unpriv bpf by default
f3ca80cced ANDROID: dm-bow: Protect Ranges fetched and erased from the RB tree
4ebb639f0d ANDROID: mm: page_pinner: fix build warning
fe75d58387 ANDROID: fault: Add vendor hook for TLB conflict
8248a3e758 BACKPORT: sched: Fix yet more sched_fork() races
cd6e5d5d7d ANDROID: mm/slub: Fix Kasan issue with for_each_object_track
8dbcaf63b2 ANDROID: dm kcopyd: Use reserved memory for the copy buffer
7b5fea2f46 ANDROID: GKI: add allowed list file for xiaomi
ae38f9954b ANDROID: GKI: Update symbols to symbol list
786bcb1109 FROMGIT: f2fs: quota: fix loop condition at f2fs_quota_sync()
91fef75d48 FROMGIT: f2fs: Restore rwsem lockdep support
4cc8ec84be ANDROID: ABI: update allowed list for galaxy
fcaaaaae6d UPSTREAM: mac80211_hwsim: initialize ieee80211_tx_info at hw_scan_work
91be4236fb ANDROID: GKI: remove vfs-only namespace from 2 symbols
a817d6ed87 ANDROID: mm: Fix page table lookup in speculative fault path
e53b1b9ad4 UPSTREAM: xhci: re-initialize the HC during resume if HCE was set
767f384155 FROMGIT: xhci: make xhci_handshake timeout for xhci_reset() adjustable
ebbf267fc0 ANDROID: vendor_hooks: Add hooks for __alloc_pages_direct_reclaim
135406cecb ANDROID: dma-direct: Document disable_dma32
bf96382fb9 ANDROID: dma-direct: Make DMA32 disablement work for CONFIG_NUMA
8f66dc1a78 UPSTREAM: mmc: block: fix read single on recovery logic
cf221db753 UPSTREAM: fget: check that the fd still exists after getting a ref to it
43754d8b7f ANDROID: GKI: Update symbols to symbol list
f2d0c30576 ANDROID: vendor_hooks: Add hooks for shrink_active_list
62412e5b8c FROMGIT: mm: count time in drain_all_pages during direct reclaim as memory pressure
3b9fe10e46 ANDROID: incremental-fs: remove spurious kfree()
acefa91e51 ANDROID: vendor_hooks: Add hooks for binder
c3ac7418e6 ANDROID: qcom: Add sysfs related symbol

Change-Id: Icbe5fb26e3cef602e3bbc01745a755a95d72a1a0
This commit is contained in:
Greg Kroah-Hartman
2022-03-21 16:48:34 +01:00
43 changed files with 1803 additions and 1506 deletions

View File

@@ -939,6 +939,10 @@
can be useful when debugging issues that require an SLB can be useful when debugging issues that require an SLB
miss to occur. miss to occur.
disable_dma32= [KNL]
Dynamically disable ZONE_DMA32 on kernels compiled with
CONFIG_ZONE_DMA32=y.
stress_slb [PPC] stress_slb [PPC]
Limits the number of kernel SLB entries, and flushes Limits the number of kernel SLB entries, and flushes
them frequently to increase the rate of SLB faults them frequently to increase the rate of SLB faults

File diff suppressed because it is too large Load Diff

View File

@@ -343,6 +343,7 @@
__traceiter_dwc3_readl __traceiter_dwc3_readl
__traceiter_dwc3_writel __traceiter_dwc3_writel
__traceiter_gpu_mem_total __traceiter_gpu_mem_total
__traceiter_kfree_skb
__traceiter_sched_util_est_se_tp __traceiter_sched_util_est_se_tp
__traceiter_xdp_exception __traceiter_xdp_exception
__tracepoint_android_rvh_account_irq __tracepoint_android_rvh_account_irq
@@ -494,6 +495,7 @@
__tracepoint_ipi_raise __tracepoint_ipi_raise
__tracepoint_irq_handler_entry __tracepoint_irq_handler_entry
__tracepoint_irq_handler_exit __tracepoint_irq_handler_exit
__tracepoint_kfree_skb
__tracepoint_pelt_cfs_tp __tracepoint_pelt_cfs_tp
__tracepoint_pelt_dl_tp __tracepoint_pelt_dl_tp
__tracepoint_pelt_irq_tp __tracepoint_pelt_irq_tp
@@ -4278,6 +4280,7 @@
usb_hcd_start_port_resume usb_hcd_start_port_resume
usb_hcd_unlink_urb_from_ep usb_hcd_unlink_urb_from_ep
usb_hcds_loaded usb_hcds_loaded
usb_hid_driver
usb_hub_clear_tt_buffer usb_hub_clear_tt_buffer
usb_hub_find_child usb_hub_find_child
usb_ifnum_to_if usb_ifnum_to_if

View File

@@ -2692,6 +2692,8 @@
__traceiter_android_vh_tune_inactive_ratio __traceiter_android_vh_tune_inactive_ratio
__traceiter_android_vh_tune_scan_type __traceiter_android_vh_tune_scan_type
__traceiter_android_vh_tune_swappiness __traceiter_android_vh_tune_swappiness
__traceiter_android_vh_page_referenced_check_bypass
__traceiter_android_vh_drain_all_pages_bypass
__traceiter_android_vh_ufs_compl_command __traceiter_android_vh_ufs_compl_command
__traceiter_android_vh_ufs_send_command __traceiter_android_vh_ufs_send_command
__traceiter_android_vh_ufs_send_tm_command __traceiter_android_vh_ufs_send_tm_command
@@ -2894,6 +2896,8 @@
__tracepoint_android_vh_tune_inactive_ratio __tracepoint_android_vh_tune_inactive_ratio
__tracepoint_android_vh_tune_scan_type __tracepoint_android_vh_tune_scan_type
__tracepoint_android_vh_tune_swappiness __tracepoint_android_vh_tune_swappiness
__tracepoint_android_vh_page_referenced_check_bypass
__tracepoint_android_vh_drain_all_pages_bypass
__tracepoint_android_vh_ufs_compl_command __tracepoint_android_vh_ufs_compl_command
__tracepoint_android_vh_ufs_send_command __tracepoint_android_vh_ufs_send_command
__tracepoint_android_vh_ufs_send_tm_command __tracepoint_android_vh_ufs_send_tm_command

View File

@@ -2430,6 +2430,7 @@
sysfs_create_groups sysfs_create_groups
sysfs_create_link sysfs_create_link
sysfs_emit sysfs_emit
sysfs_group_change_owner
__sysfs_match_string __sysfs_match_string
sysfs_notify sysfs_notify
sysfs_remove_bin_file sysfs_remove_bin_file
@@ -2687,6 +2688,7 @@
__tracepoint_android_vh_ftrace_size_check __tracepoint_android_vh_ftrace_size_check
__tracepoint_android_vh_gic_resume __tracepoint_android_vh_gic_resume
__tracepoint_android_vh_gpio_block_read __tracepoint_android_vh_gpio_block_read
__tracepoint_android_vh_handle_tlb_conf
__tracepoint_android_vh_iommu_setup_dma_ops __tracepoint_android_vh_iommu_setup_dma_ops
__tracepoint_android_vh_ipi_stop __tracepoint_android_vh_ipi_stop
__tracepoint_android_vh_jiffies_update __tracepoint_android_vh_jiffies_update

View File

@@ -194,3 +194,9 @@
#extend_reclaim.ko #extend_reclaim.ko
try_to_free_mem_cgroup_pages try_to_free_mem_cgroup_pages
##required by xm_power_debug.ko module
wakeup_sources_read_lock
wakeup_sources_read_unlock
wakeup_sources_walk_start
wakeup_sources_walk_next

View File

@@ -112,6 +112,7 @@
#define ESR_ELx_FSC_ACCESS (0x08) #define ESR_ELx_FSC_ACCESS (0x08)
#define ESR_ELx_FSC_FAULT (0x04) #define ESR_ELx_FSC_FAULT (0x04)
#define ESR_ELx_FSC_PERM (0x0C) #define ESR_ELx_FSC_PERM (0x0C)
#define ESR_ELx_FSC_TLBCONF (0x30)
/* ISS field definitions for Data Aborts */ /* ISS field definitions for Data Aborts */
#define ESR_ELx_ISV_SHIFT (24) #define ESR_ELx_ISV_SHIFT (24)

View File

@@ -711,7 +711,11 @@ static int do_alignment_fault(unsigned long far, unsigned int esr,
static int do_bad(unsigned long far, unsigned int esr, struct pt_regs *regs) static int do_bad(unsigned long far, unsigned int esr, struct pt_regs *regs)
{ {
return 1; /* "fault" */ unsigned long addr = untagged_addr(far);
int ret = 1;
trace_android_vh_handle_tlb_conf(addr, esr, &ret);
return ret;
} }
static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs) static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs)

View File

@@ -2490,6 +2490,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
struct binder_priority node_prio; struct binder_priority node_prio;
bool oneway = !!(t->flags & TF_ONE_WAY); bool oneway = !!(t->flags & TF_ONE_WAY);
bool pending_async = false; bool pending_async = false;
bool skip = false;
BUG_ON(!node); BUG_ON(!node);
binder_node_lock(node); binder_node_lock(node);
@@ -2517,7 +2518,10 @@ static int binder_proc_transaction(struct binder_transaction *t,
return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY; return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
} }
if (!thread && !pending_async) trace_android_vh_binder_proc_transaction_entry(proc, t,
&thread, node->debug_id, pending_async, !oneway, &skip);
if (!thread && !pending_async && !skip)
thread = binder_select_thread_ilocked(proc); thread = binder_select_thread_ilocked(proc);
trace_android_vh_binder_proc_transaction(current, proc->tsk, trace_android_vh_binder_proc_transaction(current, proc->tsk,
@@ -2701,8 +2705,8 @@ static void binder_transaction(struct binder_proc *proc,
ref->node, &target_proc, ref->node, &target_proc,
&return_error); &return_error);
} else { } else {
binder_user_error("%d:%d got transaction to invalid handle\n", binder_user_error("%d:%d got transaction to invalid handle, %u\n",
proc->pid, thread->pid); proc->pid, thread->pid, tr->target.handle);
return_error = BR_FAILED_REPLY; return_error = BR_FAILED_REPLY;
} }
binder_proc_unlock(proc); binder_proc_unlock(proc);
@@ -4032,6 +4036,10 @@ retry:
size_t trsize = sizeof(*trd); size_t trsize = sizeof(*trd);
binder_inner_proc_lock(proc); binder_inner_proc_lock(proc);
trace_android_vh_binder_select_worklist_ilocked(&list, thread,
proc, wait_for_proc_work);
if (list)
goto skip;
if (!binder_worklist_empty_ilocked(&thread->todo)) if (!binder_worklist_empty_ilocked(&thread->todo))
list = &thread->todo; list = &thread->todo;
else if (!binder_worklist_empty_ilocked(&proc->todo) && else if (!binder_worklist_empty_ilocked(&proc->todo) &&
@@ -4045,7 +4053,7 @@ retry:
goto retry; goto retry;
break; break;
} }
skip:
if (end - ptr < sizeof(tr) + 4) { if (end - ptr < sizeof(tr) + 4) {
binder_inner_proc_unlock(proc); binder_inner_proc_unlock(proc);
break; break;

View File

@@ -277,8 +277,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_reply);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_trans); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_trans);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_binder_transaction); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_binder_transaction);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_preset); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_preset);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_end); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_select_worklist_ilocked);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_new_ref); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_new_ref);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_del_ref); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_del_ref);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_post_init_entity_util_avg); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_post_init_entity_util_avg);
@@ -289,6 +291,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_logbuf_pr_cont);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_scan_type); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_scan_type);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_swappiness); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_swappiness);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_slab_bypass); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_slab_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_referenced_check_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_drain_all_pages_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_event); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_event);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_group); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_group);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuset_fork); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuset_fork);
@@ -388,3 +392,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_free_proc);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_thread_release); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_thread_release);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_has_work_ilocked); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_has_work_ilocked);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_read_done); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_read_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_tlb_conf);

View File

@@ -236,6 +236,7 @@ static void set_type(struct bow_context *bc, struct bow_range **br, int type)
(*br)->type = type; (*br)->type = type;
mutex_lock(&bc->ranges_lock);
if (next->type == type) { if (next->type == type) {
if (type == TRIMMED) if (type == TRIMMED)
list_del(&next->trimmed_list); list_del(&next->trimmed_list);
@@ -249,6 +250,7 @@ static void set_type(struct bow_context *bc, struct bow_range **br, int type)
rb_erase(&(*br)->node, &bc->ranges); rb_erase(&(*br)->node, &bc->ranges);
kfree(*br); kfree(*br);
} }
mutex_unlock(&bc->ranges_lock);
*br = NULL; *br = NULL;
} }
@@ -599,6 +601,7 @@ static void dm_bow_dtr(struct dm_target *ti)
struct bow_context *bc = (struct bow_context *) ti->private; struct bow_context *bc = (struct bow_context *) ti->private;
struct kobject *kobj; struct kobject *kobj;
mutex_lock(&bc->ranges_lock);
while (rb_first(&bc->ranges)) { while (rb_first(&bc->ranges)) {
struct bow_range *br = container_of(rb_first(&bc->ranges), struct bow_range *br = container_of(rb_first(&bc->ranges),
struct bow_range, node); struct bow_range, node);
@@ -606,6 +609,8 @@ static void dm_bow_dtr(struct dm_target *ti)
rb_erase(&br->node, &bc->ranges); rb_erase(&br->node, &bc->ranges);
kfree(br); kfree(br);
} }
mutex_unlock(&bc->ranges_lock);
if (bc->workqueue) if (bc->workqueue)
destroy_workqueue(bc->workqueue); destroy_workqueue(bc->workqueue);
if (bc->bufio) if (bc->bufio)
@@ -1181,6 +1186,7 @@ static void dm_bow_tablestatus(struct dm_target *ti, char *result,
return; return;
} }
mutex_lock(&bc->ranges_lock);
for (i = rb_first(&bc->ranges); i; i = rb_next(i)) { for (i = rb_first(&bc->ranges); i; i = rb_next(i)) {
struct bow_range *br = container_of(i, struct bow_range, node); struct bow_range *br = container_of(i, struct bow_range, node);
@@ -1188,11 +1194,11 @@ static void dm_bow_tablestatus(struct dm_target *ti, char *result,
readable_type[br->type], readable_type[br->type],
(unsigned long long)br->sector); (unsigned long long)br->sector);
if (result >= end) if (result >= end)
return; goto unlock;
result += scnprintf(result, end - result, "\n"); result += scnprintf(result, end - result, "\n");
if (result >= end) if (result >= end)
return; goto unlock;
if (br->type == TRIMMED) if (br->type == TRIMMED)
++trimmed_range_count; ++trimmed_range_count;
@@ -1214,19 +1220,22 @@ static void dm_bow_tablestatus(struct dm_target *ti, char *result,
if (!rb_next(i)) { if (!rb_next(i)) {
scnprintf(result, end - result, scnprintf(result, end - result,
"\nERROR: Last range not of type TOP"); "\nERROR: Last range not of type TOP");
return; goto unlock;
} }
if (br->sector > range_top(br)) { if (br->sector > range_top(br)) {
scnprintf(result, end - result, scnprintf(result, end - result,
"\nERROR: sectors out of order"); "\nERROR: sectors out of order");
return; goto unlock;
} }
} }
if (trimmed_range_count != trimmed_list_length) if (trimmed_range_count != trimmed_list_length)
scnprintf(result, end - result, scnprintf(result, end - result,
"\nERROR: not all trimmed ranges in trimmed list"); "\nERROR: not all trimmed ranges in trimmed list");
unlock:
mutex_unlock(&bc->ranges_lock);
} }
static void dm_bow_status(struct dm_target *ti, status_type_t type, static void dm_bow_status(struct dm_target *ti, status_type_t type,

View File

@@ -17,6 +17,8 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/mempool.h> #include <linux/mempool.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
@@ -39,6 +41,105 @@ static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR); module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients"); MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
static bool rsm_enabled;
static phys_addr_t rsm_mem_base, rsm_mem_size;
#ifndef MODULE
static DEFINE_SPINLOCK(rsm_lock);
static int *rsm_mem;
static int rsm_page_cnt;
static int rsm_tbl_idx;
static struct reserved_mem *rmem;
static void __init kcopyd_rsm_init(void)
{
static struct device_node *rsm_node;
int ret = 0;
if (!rsm_enabled)
return;
rsm_node = of_find_compatible_node(NULL, NULL, "mediatek,dm_ota");
if (!rsm_node) {
ret = -ENODEV;
goto out;
}
rmem = of_reserved_mem_lookup(rsm_node);
if (!rmem) {
ret = -EINVAL;
goto out_put_node;
}
rsm_mem_base = rmem->base;
rsm_mem_size = rmem->size;
rsm_page_cnt = rsm_mem_size / PAGE_SIZE;
rsm_mem = kcalloc(rsm_page_cnt, sizeof(int), GFP_KERNEL);
if (!rsm_mem)
ret = -ENOMEM;
out_put_node:
of_node_put(rsm_node);
out:
if (ret)
pr_warn("kcopyd: failed to init rsm: %d", ret);
}
static int __init kcopyd_rsm_enable(char *str)
{
rsm_enabled = true;
return 0;
}
early_param("mtk_kcopyd_quirk", kcopyd_rsm_enable);
static void kcopyd_rsm_get_page(struct page **p)
{
int i;
unsigned long flags;
*p = NULL;
spin_lock_irqsave(&rsm_lock, flags);
for (i = 0 ; i < rsm_page_cnt ; i++) {
rsm_tbl_idx = (rsm_tbl_idx + 1 == rsm_page_cnt) ? 0 : rsm_tbl_idx + 1;
if (rsm_mem[rsm_tbl_idx] == 0) {
rsm_mem[rsm_tbl_idx] = 1;
*p = virt_to_page(phys_to_virt(rsm_mem_base + PAGE_SIZE
* rsm_tbl_idx));
break;
}
}
spin_unlock_irqrestore(&rsm_lock, flags);
}
static void kcopyd_rsm_drop_page(struct page **p)
{
u64 off;
unsigned long flags;
if (*p) {
off = page_to_phys(*p) - rsm_mem_base;
spin_lock_irqsave(&rsm_lock, flags);
rsm_mem[off >> PAGE_SHIFT] = 0;
spin_unlock_irqrestore(&rsm_lock, flags);
*p = NULL;
}
}
static void kcopyd_rsm_destroy(void)
{
if (rsm_enabled)
kfree(rsm_mem);
}
#else
#define kcopyd_rsm_destroy(...)
#define kcopyd_rsm_drop_page(...)
#define kcopyd_rsm_get_page(...)
#define kcopyd_rsm_init(...)
#endif
static unsigned dm_get_kcopyd_subjob_size(void) static unsigned dm_get_kcopyd_subjob_size(void)
{ {
unsigned sub_job_size_kb; unsigned sub_job_size_kb;
@@ -211,7 +312,7 @@ static void wake(struct dm_kcopyd_client *kc)
/* /*
* Obtain one page for the use of kcopyd. * Obtain one page for the use of kcopyd.
*/ */
static struct page_list *alloc_pl(gfp_t gfp) static struct page_list *alloc_pl(gfp_t gfp, unsigned long job_flags)
{ {
struct page_list *pl; struct page_list *pl;
@@ -219,7 +320,12 @@ static struct page_list *alloc_pl(gfp_t gfp)
if (!pl) if (!pl)
return NULL; return NULL;
if (rsm_enabled && test_bit(DM_KCOPYD_SNAP_MERGE, &job_flags)) {
kcopyd_rsm_get_page(&pl->page);
} else {
pl->page = alloc_page(gfp); pl->page = alloc_page(gfp);
}
if (!pl->page) { if (!pl->page) {
kfree(pl); kfree(pl);
return NULL; return NULL;
@@ -230,7 +336,14 @@ static struct page_list *alloc_pl(gfp_t gfp)
static void free_pl(struct page_list *pl) static void free_pl(struct page_list *pl)
{ {
struct page *p = pl->page;
phys_addr_t pa = page_to_phys(p);
if (rsm_enabled && pa >= rsm_mem_base && pa < rsm_mem_base + rsm_mem_size)
kcopyd_rsm_drop_page(&pl->page);
else
__free_page(pl->page); __free_page(pl->page);
kfree(pl); kfree(pl);
} }
@@ -258,14 +371,15 @@ static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
} }
static int kcopyd_get_pages(struct dm_kcopyd_client *kc, static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
unsigned int nr, struct page_list **pages) unsigned int nr, struct page_list **pages,
unsigned long job_flags)
{ {
struct page_list *pl; struct page_list *pl;
*pages = NULL; *pages = NULL;
do { do {
pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM); pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM, job_flags);
if (unlikely(!pl)) { if (unlikely(!pl)) {
/* Use reserved pages */ /* Use reserved pages */
pl = kc->pages; pl = kc->pages;
@@ -309,7 +423,7 @@ static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
struct page_list *pl = NULL, *next; struct page_list *pl = NULL, *next;
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
next = alloc_pl(GFP_KERNEL); next = alloc_pl(GFP_KERNEL, 0);
if (!next) { if (!next) {
if (pl) if (pl)
drop_pages(pl); drop_pages(pl);
@@ -395,6 +509,8 @@ int __init dm_kcopyd_init(void)
zero_page_list.next = &zero_page_list; zero_page_list.next = &zero_page_list;
zero_page_list.page = ZERO_PAGE(0); zero_page_list.page = ZERO_PAGE(0);
kcopyd_rsm_init();
return 0; return 0;
} }
@@ -402,6 +518,7 @@ void dm_kcopyd_exit(void)
{ {
kmem_cache_destroy(_job_cache); kmem_cache_destroy(_job_cache);
_job_cache = NULL; _job_cache = NULL;
kcopyd_rsm_destroy();
} }
/* /*
@@ -586,7 +703,7 @@ static int run_pages_job(struct kcopyd_job *job)
int r; int r;
unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9); unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
r = kcopyd_get_pages(job->kc, nr_pages, &job->pages); r = kcopyd_get_pages(job->kc, nr_pages, &job->pages, job->flags);
if (!r) { if (!r) {
/* this job is ready for io */ /* this job is ready for io */
push(&job->kc->io_jobs, job); push(&job->kc->io_jobs, job);

View File

@@ -1117,7 +1117,8 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
for (i = 0; i < linear_chunks; i++) for (i = 0; i < linear_chunks; i++)
__check_for_conflicting_io(s, old_chunk + i); __check_for_conflicting_io(s, old_chunk + i);
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 1 << DM_KCOPYD_SNAP_MERGE,
merge_callback, s);
return; return;
shut: shut:

View File

@@ -220,6 +220,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail; goto fail;
} }
/* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
shost->can_queue);
error = scsi_init_sense_cache(shost); error = scsi_init_sense_cache(shost);
if (error) if (error)
goto fail; goto fail;
@@ -228,12 +232,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (error) if (error)
goto fail; goto fail;
shost->can_queue = shost->tag_set.queue_depth;
/* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
shost->can_queue);
if (!shost->shost_gendev.parent) if (!shost->shost_gendev.parent)
shost->shost_gendev.parent = dev ? dev : &platform_bus; shost->shost_gendev.parent = dev ? dev : &platform_bus;
if (!dma_dev) if (!dma_dev)

View File

@@ -1907,10 +1907,6 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
tag_set->ops = &scsi_mq_ops_no_commit; tag_set->ops = &scsi_mq_ops_no_commit;
tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
tag_set->queue_depth = shost->can_queue; tag_set->queue_depth = shost->can_queue;
if (shost->hostt->name && strcmp(shost->hostt->name, "ufshcd") == 0) {
tag_set->queue_depth--;
tag_set->reserved_tags++;
}
tag_set->cmd_size = cmd_size; tag_set->cmd_size = cmd_size;
tag_set->numa_node = NUMA_NO_NODE; tag_set->numa_node = NUMA_NO_NODE;
tag_set->flags = BLK_MQ_F_SHOULD_MERGE; tag_set->flags = BLK_MQ_F_SHOULD_MERGE;

View File

@@ -642,6 +642,7 @@ static int rndis_set_response(struct rndis_params *params,
BufLength = le32_to_cpu(buf->InformationBufferLength); BufLength = le32_to_cpu(buf->InformationBufferLength);
BufOffset = le32_to_cpu(buf->InformationBufferOffset); BufOffset = le32_to_cpu(buf->InformationBufferOffset);
if ((BufLength > RNDIS_MAX_TOTAL_SIZE) || if ((BufLength > RNDIS_MAX_TOTAL_SIZE) ||
(BufOffset > RNDIS_MAX_TOTAL_SIZE) ||
(BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE)) (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE))
return -EINVAL; return -EINVAL;

View File

@@ -1437,7 +1437,6 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
usb_gadget_udc_stop(udc); usb_gadget_udc_stop(udc);
udc->driver = NULL; udc->driver = NULL;
udc->dev.driver = NULL;
udc->gadget->dev.driver = NULL; udc->gadget->dev.driver = NULL;
} }
@@ -1499,7 +1498,6 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
driver->function); driver->function);
udc->driver = driver; udc->driver = driver;
udc->dev.driver = &driver->driver;
udc->gadget->dev.driver = &driver->driver; udc->gadget->dev.driver = &driver->driver;
usb_gadget_udc_set_speed(udc, driver->max_speed); usb_gadget_udc_set_speed(udc, driver->max_speed);
@@ -1522,7 +1520,6 @@ err1:
dev_err(&udc->dev, "failed to start %s: %d\n", dev_err(&udc->dev, "failed to start %s: %d\n",
udc->driver->function, ret); udc->driver->function, ret);
udc->driver = NULL; udc->driver = NULL;
udc->dev.driver = NULL;
udc->gadget->dev.driver = NULL; udc->gadget->dev.driver = NULL;
return ret; return ret;
} }

View File

@@ -681,7 +681,7 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci)
} }
pm_runtime_allow(xhci_to_hcd(xhci)->self.controller); pm_runtime_allow(xhci_to_hcd(xhci)->self.controller);
xhci->test_mode = 0; xhci->test_mode = 0;
return xhci_reset(xhci); return xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
} }
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port, void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,

View File

@@ -2695,7 +2695,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
fail: fail:
xhci_halt(xhci); xhci_halt(xhci);
xhci_reset(xhci); xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
xhci_mem_cleanup(xhci); xhci_mem_cleanup(xhci);
return -ENOMEM; return -ENOMEM;
} }

View File

@@ -65,7 +65,7 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
* handshake done). There are two failure modes: "usec" have passed (major * handshake done). There are two failure modes: "usec" have passed (major
* hardware flakeout), or the register reads as all-ones (hardware removed). * hardware flakeout), or the register reads as all-ones (hardware removed).
*/ */
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
{ {
u32 result; u32 result;
int ret; int ret;
@@ -73,7 +73,7 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
ret = readl_poll_timeout_atomic(ptr, result, ret = readl_poll_timeout_atomic(ptr, result,
(result & mask) == done || (result & mask) == done ||
result == U32_MAX, result == U32_MAX,
1, usec); 1, timeout_us);
if (result == U32_MAX) /* card removed */ if (result == U32_MAX) /* card removed */
return -ENODEV; return -ENODEV;
@@ -162,7 +162,7 @@ int xhci_start(struct xhci_hcd *xhci)
* Transactions will be terminated immediately, and operational registers * Transactions will be terminated immediately, and operational registers
* will be set to their defaults. * will be set to their defaults.
*/ */
int xhci_reset(struct xhci_hcd *xhci) int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
{ {
u32 command; u32 command;
u32 state; u32 state;
@@ -195,8 +195,7 @@ int xhci_reset(struct xhci_hcd *xhci)
if (xhci->quirks & XHCI_INTEL_HOST) if (xhci->quirks & XHCI_INTEL_HOST)
udelay(1000); udelay(1000);
ret = xhci_handshake(&xhci->op_regs->command, ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
CMD_RESET, 0, 10 * 1000 * 1000);
if (ret) if (ret)
return ret; return ret;
@@ -209,8 +208,7 @@ int xhci_reset(struct xhci_hcd *xhci)
* xHCI cannot write to any doorbells or operational registers other * xHCI cannot write to any doorbells or operational registers other
* than status until the "Controller Not Ready" flag is cleared. * than status until the "Controller Not Ready" flag is cleared.
*/ */
ret = xhci_handshake(&xhci->op_regs->status, ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
STS_CNR, 0, 10 * 1000 * 1000);
xhci->usb2_rhub.bus_state.port_c_suspend = 0; xhci->usb2_rhub.bus_state.port_c_suspend = 0;
xhci->usb2_rhub.bus_state.suspended_ports = 0; xhci->usb2_rhub.bus_state.suspended_ports = 0;
@@ -731,7 +729,7 @@ static void xhci_stop(struct usb_hcd *hcd)
xhci->xhc_state |= XHCI_STATE_HALTED; xhci->xhc_state |= XHCI_STATE_HALTED;
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
xhci_halt(xhci); xhci_halt(xhci);
xhci_reset(xhci); xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
spin_unlock_irq(&xhci->lock); spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci); xhci_cleanup_msix(xhci);
@@ -784,7 +782,7 @@ void xhci_shutdown(struct usb_hcd *hcd)
xhci_halt(xhci); xhci_halt(xhci);
/* Workaround for spurious wakeups at shutdown with HSW */ /* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
xhci_reset(xhci); xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
spin_unlock_irq(&xhci->lock); spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci); xhci_cleanup_msix(xhci);
@@ -1170,7 +1168,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_dbg(xhci, "Stop HCD\n"); xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci); xhci_halt(xhci);
xhci_zero_64b_regs(xhci); xhci_zero_64b_regs(xhci);
retval = xhci_reset(xhci); retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
spin_unlock_irq(&xhci->lock); spin_unlock_irq(&xhci->lock);
if (retval) if (retval)
return retval; return retval;
@@ -5282,7 +5280,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
xhci_dbg(xhci, "Resetting HCD\n"); xhci_dbg(xhci, "Resetting HCD\n");
/* Reset the internal HC memory state and registers. */ /* Reset the internal HC memory state and registers. */
retval = xhci_reset(xhci); retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
if (retval) if (retval)
return retval; return retval;
xhci_dbg(xhci, "Reset complete\n"); xhci_dbg(xhci, "Reset complete\n");

View File

@@ -230,6 +230,9 @@ struct xhci_op_regs {
#define CMD_ETE (1 << 14) #define CMD_ETE (1 << 14)
/* bits 15:31 are reserved (and should be preserved on writes). */ /* bits 15:31 are reserved (and should be preserved on writes). */
#define XHCI_RESET_LONG_USEC (10 * 1000 * 1000)
#define XHCI_RESET_SHORT_USEC (250 * 1000)
/* IMAN - Interrupt Management Register */ /* IMAN - Interrupt Management Register */
#define IMAN_IE (1 << 1) #define IMAN_IE (1 << 1)
#define IMAN_IP (1 << 0) #define IMAN_IP (1 << 0)
@@ -2097,11 +2100,11 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci,
/* xHCI host controller glue */ /* xHCI host controller glue */
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec); int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us);
void xhci_quiesce(struct xhci_hcd *xhci); void xhci_quiesce(struct xhci_hcd *xhci);
int xhci_halt(struct xhci_hcd *xhci); int xhci_halt(struct xhci_hcd *xhci);
int xhci_start(struct xhci_hcd *xhci); int xhci_start(struct xhci_hcd *xhci);
int xhci_reset(struct xhci_hcd *xhci); int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us);
int xhci_run(struct usb_hcd *hcd); int xhci_run(struct usb_hcd *hcd);
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
void xhci_shutdown(struct usb_hcd *hcd); void xhci_shutdown(struct usb_hcd *hcd);

View File

@@ -3189,7 +3189,7 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags)
} }
return ret; return ret;
} }
EXPORT_SYMBOL_NS(__sync_dirty_buffer, ANDROID_GKI_VFS_EXPORT_ONLY); EXPORT_SYMBOL(__sync_dirty_buffer);
int sync_dirty_buffer(struct buffer_head *bh) int sync_dirty_buffer(struct buffer_head *bh)
{ {

View File

@@ -2081,9 +2081,17 @@ static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set; return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
} }
static inline void init_f2fs_rwsem(struct f2fs_rwsem *sem) #define init_f2fs_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
__init_f2fs_rwsem((sem), #sem, &__key); \
} while (0)
static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem,
const char *sem_name, struct lock_class_key *key)
{ {
init_rwsem(&sem->internal_rwsem); __init_rwsem(&sem->internal_rwsem, sem_name, key);
init_waitqueue_head(&sem->read_waiters); init_waitqueue_head(&sem->read_waiters);
} }

View File

@@ -2525,7 +2525,7 @@ int f2fs_quota_sync(struct super_block *sb, int type)
struct f2fs_sb_info *sbi = F2FS_SB(sb); struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct quota_info *dqopt = sb_dqopt(sb); struct quota_info *dqopt = sb_dqopt(sb);
int cnt; int cnt;
int ret; int ret = 0;
/* /*
* Now when everything is written we can discard the pagecache so * Now when everything is written we can discard the pagecache so
@@ -2536,8 +2536,8 @@ int f2fs_quota_sync(struct super_block *sb, int type)
if (type != -1 && cnt != type) if (type != -1 && cnt != type)
continue; continue;
if (!sb_has_quota_active(sb, type)) if (!sb_has_quota_active(sb, cnt))
return 0; continue;
inode_lock(dqopt->files[cnt]); inode_lock(dqopt->files[cnt]);

View File

@@ -175,7 +175,6 @@ void incfs_free_mount_info(struct mount_info *mi)
kfree(mi->pseudo_file_xattr[i].data); kfree(mi->pseudo_file_xattr[i].data);
kfree(mi->mi_per_uid_read_timeouts); kfree(mi->mi_per_uid_read_timeouts);
incfs_free_sysfs_node(mi->mi_sysfs_node); incfs_free_sysfs_node(mi->mi_sysfs_node);
kfree(mi->mi_options.sysfs_name);
kfree(mi); kfree(mi);
} }

View File

@@ -2533,7 +2533,7 @@ int kern_path(const char *name, unsigned int flags, struct path *path)
return filename_lookup(AT_FDCWD, getname_kernel(name), return filename_lookup(AT_FDCWD, getname_kernel(name),
flags, path, NULL); flags, path, NULL);
} }
EXPORT_SYMBOL_NS(kern_path, ANDROID_GKI_VFS_EXPORT_ONLY); EXPORT_SYMBOL(kern_path);
/** /**
* vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair

View File

@@ -21,6 +21,7 @@
#define DM_KCOPYD_IGNORE_ERROR 1 #define DM_KCOPYD_IGNORE_ERROR 1
#define DM_KCOPYD_WRITE_SEQ 2 #define DM_KCOPYD_WRITE_SEQ 2
#define DM_KCOPYD_SNAP_MERGE 3
struct dm_kcopyd_throttle { struct dm_kcopyd_throttle {
unsigned throttle; unsigned throttle;

View File

@@ -37,11 +37,16 @@ static inline bool zone_dma32_is_empty(int node)
static inline bool zone_dma32_are_empty(void) static inline bool zone_dma32_are_empty(void)
{ {
#ifdef CONFIG_NUMA
int node; int node;
for_each_node(node) for_each_node(node)
if (!zone_dma32_is_empty(node)) if (!zone_dma32_is_empty(node))
return false; return false;
#else
if (!zone_dma32_is_empty(numa_node_id()))
return false;
#endif
return true; return true;
} }

View File

@@ -795,7 +795,7 @@ PAGE_TYPE_OPS(Guard, guard)
extern bool is_free_buddy_page(struct page *page); extern bool is_free_buddy_page(struct page *page);
__PAGEFLAG(Isolated, isolated, PF_ANY); PAGEFLAG(Isolated, isolated, PF_ANY);
/* /*
* If network-based swap is enabled, sl*b must keep track of whether pages * If network-based swap is enabled, sl*b must keep track of whether pages

View File

@@ -55,8 +55,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu); extern void init_idle(struct task_struct *idle, int cpu);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p); extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_post_fork(struct task_struct *p, extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
struct kernel_clone_args *kargs); extern void sched_post_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p); extern void sched_dead(struct task_struct *p);
void __noreturn do_task_dead(void); void __noreturn do_task_dead(void);

View File

@@ -59,6 +59,11 @@ DECLARE_RESTRICTED_HOOK(android_rvh_binder_transaction,
DECLARE_HOOK(android_vh_binder_preset, DECLARE_HOOK(android_vh_binder_preset,
TP_PROTO(struct hlist_head *hhead, struct mutex *lock), TP_PROTO(struct hlist_head *hhead, struct mutex *lock),
TP_ARGS(hhead, lock)); TP_ARGS(hhead, lock));
DECLARE_HOOK(android_vh_binder_proc_transaction_entry,
TP_PROTO(struct binder_proc *proc, struct binder_transaction *t,
struct binder_thread **thread, int node_debug_id, bool pending_async,
bool sync, bool *skip),
TP_ARGS(proc, t, thread, node_debug_id, pending_async, sync, skip));
DECLARE_HOOK(android_vh_binder_proc_transaction, DECLARE_HOOK(android_vh_binder_proc_transaction,
TP_PROTO(struct task_struct *caller_task, struct task_struct *binder_proc_task, TP_PROTO(struct task_struct *caller_task, struct task_struct *binder_proc_task,
struct task_struct *binder_th_task, int node_debug_id, struct task_struct *binder_th_task, int node_debug_id,
@@ -69,6 +74,10 @@ DECLARE_HOOK(android_vh_binder_proc_transaction_end,
struct task_struct *binder_th_task, unsigned int code, struct task_struct *binder_th_task, unsigned int code,
bool pending_async, bool sync), bool pending_async, bool sync),
TP_ARGS(caller_task, binder_proc_task, binder_th_task, code, pending_async, sync)); TP_ARGS(caller_task, binder_proc_task, binder_th_task, code, pending_async, sync));
DECLARE_HOOK(android_vh_binder_select_worklist_ilocked,
TP_PROTO(struct list_head **list, struct binder_thread *thread, struct binder_proc *proc,
int wait_for_proc_work),
TP_ARGS(list, thread, proc, wait_for_proc_work));
DECLARE_HOOK(android_vh_binder_new_ref, DECLARE_HOOK(android_vh_binder_new_ref,
TP_PROTO(struct task_struct *proc, uint32_t ref_desc, int node_debug_id), TP_PROTO(struct task_struct *proc, uint32_t ref_desc, int node_debug_id),
TP_ARGS(proc, ref_desc, node_debug_id)); TP_ARGS(proc, ref_desc, node_debug_id));

View File

@@ -29,6 +29,10 @@ DECLARE_RESTRICTED_HOOK(android_rvh_do_sp_pc_abort,
TP_ARGS(regs, esr, addr, user), TP_ARGS(regs, esr, addr, user),
TP_CONDITION(!user)); TP_CONDITION(!user));
DECLARE_HOOK(android_vh_handle_tlb_conf,
TP_PROTO(unsigned long addr, unsigned int esr, int *ret),
TP_ARGS(addr, esr, ret));
/* macro versions of hooks are no longer required */ /* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_FAULT_H */ #endif /* _TRACE_HOOK_FAULT_H */

View File

@@ -117,6 +117,11 @@ DECLARE_HOOK(android_vh_mmap_region,
DECLARE_HOOK(android_vh_try_to_unmap_one, DECLARE_HOOK(android_vh_try_to_unmap_one,
TP_PROTO(struct vm_area_struct *vma, struct page *page, unsigned long addr, bool ret), TP_PROTO(struct vm_area_struct *vma, struct page *page, unsigned long addr, bool ret),
TP_ARGS(vma, page, addr, ret)); TP_ARGS(vma, page, addr, ret));
DECLARE_HOOK(android_vh_drain_all_pages_bypass,
TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long alloc_flags,
int migratetype, unsigned long did_some_progress,
bool *bypass),
TP_ARGS(gfp_mask, order, alloc_flags, migratetype, did_some_progress, bypass));
struct device; struct device;
DECLARE_HOOK(android_vh_subpage_dma_contig_alloc, DECLARE_HOOK(android_vh_subpage_dma_contig_alloc,
TP_PROTO(bool *allow_subpage_alloc, struct device *dev, size_t *size), TP_PROTO(bool *allow_subpage_alloc, struct device *dev, size_t *size),

View File

@@ -25,6 +25,10 @@ DECLARE_HOOK(android_vh_tune_inactive_ratio,
DECLARE_RESTRICTED_HOOK(android_rvh_set_balance_anon_file_reclaim, DECLARE_RESTRICTED_HOOK(android_rvh_set_balance_anon_file_reclaim,
TP_PROTO(bool *balance_anon_file_reclaim), TP_PROTO(bool *balance_anon_file_reclaim),
TP_ARGS(balance_anon_file_reclaim), 1); TP_ARGS(balance_anon_file_reclaim), 1);
DECLARE_HOOK(android_vh_page_referenced_check_bypass,
TP_PROTO(struct page *page, unsigned long nr_to_scan, int lru, bool *bypass),
TP_ARGS(page, nr_to_scan, lru, bypass));
#endif /* _TRACE_HOOK_VMSCAN_H */ #endif /* _TRACE_HOOK_VMSCAN_H */
/* This part must be outside protection */ /* This part must be outside protection */
#include <trace/define_trace.h> #include <trace/define_trace.h>

View File

@@ -62,7 +62,7 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits)) if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
return GFP_DMA; return GFP_DMA;
if (*phys_limit <= DMA_BIT_MASK(32) && if (*phys_limit <= DMA_BIT_MASK(32) &&
!zone_dma32_is_empty(dev_to_node(dev))) !zone_dma32_are_empty())
return GFP_DMA32; return GFP_DMA32;
return 0; return 0;
} }
@@ -103,7 +103,7 @@ again:
if (IS_ENABLED(CONFIG_ZONE_DMA32) && if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
phys_limit < DMA_BIT_MASK(64) && phys_limit < DMA_BIT_MASK(64) &&
!(gfp & (GFP_DMA32 | GFP_DMA)) && !(gfp & (GFP_DMA32 | GFP_DMA)) &&
!zone_dma32_is_empty(node)) { !zone_dma32_are_empty()) {
gfp |= GFP_DMA32; gfp |= GFP_DMA32;
goto again; goto again;
} }

View File

@@ -2249,6 +2249,17 @@ static __latent_entropy struct task_struct *copy_process(
if (retval) if (retval)
goto bad_fork_put_pidfd; goto bad_fork_put_pidfd;
/*
* Now that the cgroups are pinned, re-clone the parent cgroup and put
* the new task on the correct runqueue. All this *before* the task
* becomes visible.
*
* This isn't part of ->can_fork() because while the re-cloning is
* cgroup specific, it unconditionally needs to place the task on a
* runqueue.
*/
sched_cgroup_fork(p, args);
/* /*
* From this point on we must avoid any synchronous user-space * From this point on we must avoid any synchronous user-space
* communication until we take the tasklist-lock. In particular, we do * communication until we take the tasklist-lock. In particular, we do
@@ -2356,7 +2367,7 @@ static __latent_entropy struct task_struct *copy_process(
fd_install(pidfd, pidfile); fd_install(pidfd, pidfile);
proc_fork_connector(p); proc_fork_connector(p);
sched_post_fork(p, args); sched_post_fork(p);
cgroup_post_fork(p, args); cgroup_post_fork(p, args);
perf_event_fork(p); perf_event_fork(p);

View File

@@ -880,9 +880,8 @@ int tg_nop(struct task_group *tg, void *data)
} }
#endif #endif
static void set_load_weight(struct task_struct *p) static void set_load_weight(struct task_struct *p, bool update_load)
{ {
bool update_load = !(READ_ONCE(p->state) & TASK_NEW);
int prio = p->static_prio - MAX_RT_PRIO; int prio = p->static_prio - MAX_RT_PRIO;
struct load_weight *load = &p->se.load; struct load_weight *load = &p->se.load;
@@ -3485,7 +3484,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->static_prio = NICE_TO_PRIO(0); p->static_prio = NICE_TO_PRIO(0);
p->prio = p->normal_prio = p->static_prio; p->prio = p->normal_prio = p->static_prio;
set_load_weight(p); set_load_weight(p, false);
/* /*
* We don't need the reset flag anymore after the fork. It has * We don't need the reset flag anymore after the fork. It has
@@ -3504,6 +3503,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
init_entity_runnable_average(&p->se); init_entity_runnable_average(&p->se);
trace_android_rvh_finish_prio_fork(p); trace_android_rvh_finish_prio_fork(p);
#ifdef CONFIG_SCHED_INFO #ifdef CONFIG_SCHED_INFO
if (likely(sched_info_on())) if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info)); memset(&p->sched_info, 0, sizeof(p->sched_info));
@@ -3519,18 +3519,24 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
return 0; return 0;
} }
void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs) void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
{ {
unsigned long flags; unsigned long flags;
#ifdef CONFIG_CGROUP_SCHED
struct task_group *tg;
#endif
/*
* Because we're not yet on the pid-hash, p->pi_lock isn't strictly
* required yet, but lockdep gets upset if rules are violated.
*/
raw_spin_lock_irqsave(&p->pi_lock, flags); raw_spin_lock_irqsave(&p->pi_lock, flags);
#ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
if (1) {
struct task_group *tg;
tg = container_of(kargs->cset->subsys[cpu_cgrp_id], tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
struct task_group, css); struct task_group, css);
p->sched_task_group = autogroup_task_group(p, tg); tg = autogroup_task_group(p, tg);
p->sched_task_group = tg;
}
#endif #endif
rseq_migrate(p); rseq_migrate(p);
/* /*
@@ -3541,7 +3547,10 @@ void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs)
if (p->sched_class->task_fork) if (p->sched_class->task_fork)
p->sched_class->task_fork(p); p->sched_class->task_fork(p);
raw_spin_unlock_irqrestore(&p->pi_lock, flags); raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}
void sched_post_fork(struct task_struct *p)
{
uclamp_post_fork(p); uclamp_post_fork(p);
} }
@@ -5253,7 +5262,7 @@ void set_user_nice(struct task_struct *p, long nice)
put_prev_task(rq, p); put_prev_task(rq, p);
p->static_prio = NICE_TO_PRIO(nice); p->static_prio = NICE_TO_PRIO(nice);
set_load_weight(p); set_load_weight(p, true);
old_prio = p->prio; old_prio = p->prio;
p->prio = effective_prio(p); p->prio = effective_prio(p);
@@ -5427,7 +5436,7 @@ static void __setscheduler_params(struct task_struct *p,
*/ */
p->rt_priority = attr->sched_priority; p->rt_priority = attr->sched_priority;
p->normal_prio = normal_prio(p); p->normal_prio = normal_prio(p);
set_load_weight(p); set_load_weight(p, true);
} }
/* /*
@@ -7570,7 +7579,7 @@ void __init sched_init(void)
atomic_set(&rq->nr_iowait, 0); atomic_set(&rq->nr_iowait, 0);
} }
set_load_weight(&init_task); set_load_weight(&init_task, false);
/* /*
* The boot idle thread does lazy MMU switching as well: * The boot idle thread does lazy MMU switching as well:

View File

@@ -5033,11 +5033,15 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
goto out_walk; goto out_walk;
p4d = p4d_offset(pgd, address); p4d = p4d_offset(pgd, address);
if (pgd_val(READ_ONCE(*pgd)) != pgd_val(pgdval))
goto out_walk;
p4dval = READ_ONCE(*p4d); p4dval = READ_ONCE(*p4d);
if (p4d_none(p4dval) || unlikely(p4d_bad(p4dval))) if (p4d_none(p4dval) || unlikely(p4d_bad(p4dval)))
goto out_walk; goto out_walk;
vmf.pud = pud_offset(p4d, address); vmf.pud = pud_offset(p4d, address);
if (p4d_val(READ_ONCE(*p4d)) != p4d_val(p4dval))
goto out_walk;
pudval = READ_ONCE(*vmf.pud); pudval = READ_ONCE(*vmf.pud);
if (pud_none(pudval) || unlikely(pud_bad(pudval))) if (pud_none(pudval) || unlikely(pud_bad(pudval)))
goto out_walk; goto out_walk;
@@ -5047,6 +5051,8 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
goto out_walk; goto out_walk;
vmf.pmd = pmd_offset(vmf.pud, address); vmf.pmd = pmd_offset(vmf.pud, address);
if (pud_val(READ_ONCE(*vmf.pud)) != pud_val(pudval))
goto out_walk;
vmf.orig_pmd = READ_ONCE(*vmf.pmd); vmf.orig_pmd = READ_ONCE(*vmf.pmd);
/* /*
* pmd_none could mean that a hugepage collapse is in progress * pmd_none could mean that a hugepage collapse is in progress
@@ -5074,6 +5080,11 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
*/ */
vmf.pte = pte_offset_map(vmf.pmd, address); vmf.pte = pte_offset_map(vmf.pmd, address);
if (pmd_val(READ_ONCE(*vmf.pmd)) != pmd_val(vmf.orig_pmd)) {
pte_unmap(vmf.pte);
vmf.pte = NULL;
goto out_walk;
}
vmf.orig_pte = READ_ONCE(*vmf.pte); vmf.orig_pte = READ_ONCE(*vmf.pte);
barrier(); /* See comment in handle_pte_fault() */ barrier(); /* See comment in handle_pte_fault() */
if (pte_none(vmf.orig_pte)) { if (pte_none(vmf.orig_pte)) {

View File

@@ -105,7 +105,7 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode)
/* Driver shouldn't use PG_isolated bit of page->flags */ /* Driver shouldn't use PG_isolated bit of page->flags */
WARN_ON_ONCE(PageIsolated(page)); WARN_ON_ONCE(PageIsolated(page));
__SetPageIsolated(page); SetPageIsolated(page);
unlock_page(page); unlock_page(page);
return 0; return 0;
@@ -129,7 +129,7 @@ void putback_movable_page(struct page *page)
mapping = page_mapping(page); mapping = page_mapping(page);
mapping->a_ops->putback_page(page); mapping->a_ops->putback_page(page);
__ClearPageIsolated(page); ClearPageIsolated(page);
} }
/* /*
@@ -162,7 +162,7 @@ void putback_movable_pages(struct list_head *l)
if (PageMovable(page)) if (PageMovable(page))
putback_movable_page(page); putback_movable_page(page);
else else
__ClearPageIsolated(page); ClearPageIsolated(page);
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
} else { } else {
@@ -952,7 +952,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
VM_BUG_ON_PAGE(!PageIsolated(page), page); VM_BUG_ON_PAGE(!PageIsolated(page), page);
if (!PageMovable(page)) { if (!PageMovable(page)) {
rc = MIGRATEPAGE_SUCCESS; rc = MIGRATEPAGE_SUCCESS;
__ClearPageIsolated(page); ClearPageIsolated(page);
goto out; goto out;
} }
@@ -974,7 +974,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
* We clear PG_movable under page_lock so any compactor * We clear PG_movable under page_lock so any compactor
* cannot try to migrate this page. * cannot try to migrate this page.
*/ */
__ClearPageIsolated(page); ClearPageIsolated(page);
} }
/* /*
@@ -1160,7 +1160,7 @@ static int unmap_and_move(new_page_t get_new_page,
if (unlikely(__PageMovable(page))) { if (unlikely(__PageMovable(page))) {
lock_page(page); lock_page(page);
if (!PageMovable(page)) if (!PageMovable(page))
__ClearPageIsolated(page); ClearPageIsolated(page);
unlock_page(page); unlock_page(page);
} }
goto out; goto out;
@@ -1215,7 +1215,7 @@ out:
if (PageMovable(page)) if (PageMovable(page))
putback_movable_page(page); putback_movable_page(page);
else else
__ClearPageIsolated(page); ClearPageIsolated(page);
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
} }

View File

@@ -4478,13 +4478,12 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
const struct alloc_context *ac) const struct alloc_context *ac)
{ {
unsigned int noreclaim_flag; unsigned int noreclaim_flag;
unsigned long pflags, progress; unsigned long progress;
cond_resched(); cond_resched();
/* We now go into synchronous reclaim */ /* We now go into synchronous reclaim */
cpuset_memory_pressure_bump(); cpuset_memory_pressure_bump();
psi_memstall_enter(&pflags);
fs_reclaim_acquire(gfp_mask); fs_reclaim_acquire(gfp_mask);
noreclaim_flag = memalloc_noreclaim_save(); noreclaim_flag = memalloc_noreclaim_save();
@@ -4493,7 +4492,6 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
memalloc_noreclaim_restore(noreclaim_flag); memalloc_noreclaim_restore(noreclaim_flag);
fs_reclaim_release(gfp_mask); fs_reclaim_release(gfp_mask);
psi_memstall_leave(&pflags);
cond_resched(); cond_resched();
@@ -4507,11 +4505,14 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
unsigned long *did_some_progress) unsigned long *did_some_progress)
{ {
struct page *page = NULL; struct page *page = NULL;
unsigned long pflags;
bool drained = false; bool drained = false;
bool skip_pcp_drain = false;
psi_memstall_enter(&pflags);
*did_some_progress = __perform_reclaim(gfp_mask, order, ac); *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
if (unlikely(!(*did_some_progress))) if (unlikely(!(*did_some_progress)))
return NULL; goto out;
retry: retry:
page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
@@ -4523,10 +4524,15 @@ retry:
*/ */
if (!page && !drained) { if (!page && !drained) {
unreserve_highatomic_pageblock(ac, false); unreserve_highatomic_pageblock(ac, false);
trace_android_vh_drain_all_pages_bypass(gfp_mask, order,
alloc_flags, ac->migratetype, *did_some_progress, &skip_pcp_drain);
if (!skip_pcp_drain)
drain_all_pages(NULL); drain_all_pages(NULL);
drained = true; drained = true;
goto retry; goto retry;
} }
out:
psi_memstall_leave(&pflags);
return page; return page;
} }

View File

@@ -328,7 +328,6 @@ void __dump_page_pinner(struct page *page)
void __page_pinner_migration_failed(struct page *page) void __page_pinner_migration_failed(struct page *page)
{ {
struct page_ext *page_ext = lookup_page_ext(page); struct page_ext *page_ext = lookup_page_ext(page);
struct page_pinner *page_pinner;
struct captured_pinner record; struct captured_pinner record;
unsigned long flags; unsigned long flags;
unsigned int idx; unsigned int idx;
@@ -336,7 +335,6 @@ void __page_pinner_migration_failed(struct page *page)
if (unlikely(!page_ext)) if (unlikely(!page_ext))
return; return;
page_pinner = get_page_pinner(page_ext);
if (!test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags)) if (!test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags))
return; return;

View File

@@ -599,7 +599,9 @@ unsigned long get_each_object_track(struct kmem_cache *s,
slab_lock(page); slab_lock(page);
for_each_object(p, s, page_address(page), page->objects) { for_each_object(p, s, page_address(page), page->objects) {
t = get_track(s, p, alloc); t = get_track(s, p, alloc);
metadata_access_enable();
ret = fn(s, p, t, private); ret = fn(s, p, t, private);
metadata_access_disable();
if (ret < 0) if (ret < 0)
break; break;
num_track += 1; num_track += 1;

View File

@@ -2083,6 +2083,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
unsigned nr_rotated = 0; unsigned nr_rotated = 0;
int file = is_file_lru(lru); int file = is_file_lru(lru);
struct pglist_data *pgdat = lruvec_pgdat(lruvec); struct pglist_data *pgdat = lruvec_pgdat(lruvec);
bool bypass = false;
lru_add_drain(); lru_add_drain();
@@ -2117,6 +2118,10 @@ static void shrink_active_list(unsigned long nr_to_scan,
} }
} }
trace_android_vh_page_referenced_check_bypass(page, nr_to_scan, lru, &bypass);
if (bypass)
goto skip_page_referenced;
if (page_referenced(page, 0, sc->target_mem_cgroup, if (page_referenced(page, 0, sc->target_mem_cgroup,
&vm_flags)) { &vm_flags)) {
/* /*
@@ -2134,7 +2139,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
continue; continue;
} }
} }
skip_page_referenced:
ClearPageActive(page); /* we are de-activating */ ClearPageActive(page); /* we are de-activating */
SetPageWorkingset(page); SetPageWorkingset(page);
list_add(&page->lru, &l_inactive); list_add(&page->lru, &l_inactive);